aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/pull_request_template.md3
-rw-r--r--Documentation/ABI/stable/sysfs-firmware-zynqmp103
-rw-r--r--Documentation/ABI/testing/sysfs-class-fpga-bridge9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-cortexa53-edac10
-rw-r--r--Documentation/devicetree/bindings/arm/xilinx.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt17
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-ceva.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt156
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5324.txt78
-rw-r--r--Documentation/devicetree/bindings/clock/xlnx,clocking-wizard.txt (renamed from drivers/staging/clocking-wizard/dt-binding.txt)2
-rw-r--r--Documentation/devicetree/bindings/clock/xlnx,versal-wiz.yaml58
-rw-r--r--Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt12
-rw-r--r--Documentation/devicetree/bindings/crypto/zynqmp-sha.txt12
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/bridge.txt29
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt74
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt166
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt41
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt57
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt35
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt50
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt32
-rw-r--r--Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt82
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt38
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt67
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt39
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt91
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt123
-rw-r--r--Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt13
-rw-r--r--Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt15
-rw-r--r--Documentation/devicetree/bindings/edac/pl310_edac_l2.txt19
-rw-r--r--Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt18
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt61
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt10
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt19
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-zynq.txt4
-rw-r--r--Documentation/devicetree/bindings/hwmon/tps544.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt159
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt19
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt56
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt128
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt25
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt74
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt141
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt58
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt62
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt63
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt64
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt95
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt61
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt54
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt75
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt164
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt55
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt17
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt66
-rw-r--r--Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt93
-rw-r--r--Documentation/devicetree/bindings/misc/jesd-phy.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/jesd204b.txt28
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,axi-traffic-gen.txt25
-rw-r--r--Documentation/devicetree/bindings/misc/xlnx,fclk.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/arasan_nand.txt33
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt8
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt7
-rw-r--r--Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt9
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-phy.txt15
-rw-r--r--Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt54
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt127
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_emaclite.txt35
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn.txt14
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt35
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt23
-rw-r--r--Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt90
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt133
-rw-r--r--Documentation/devicetree/bindings/perf/xilinx-apm.yaml128
-rw-r--r--Documentation/devicetree/bindings/perf/xlnx-flexnoc-pm.yaml45
-rw-r--r--Documentation/devicetree/bindings/phy/phy-zynqmp.txt119
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt275
-rw-r--r--Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt135
-rw-r--r--Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt47
-rw-r--r--Documentation/devicetree/bindings/serial/uartlite.c26
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt23
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt28
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt60
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt15
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt11
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt15
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-xilinx.txt21
-rw-r--r--Documentation/devicetree/bindings/usb/udc-xilinx.txt19
-rw-r--r--Documentation/devicetree/bindings/video/xilinx-fb.txt35
-rw-r--r--Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt21
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt1
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-fb.txt22
-rw-r--r--Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt21
-rw-r--r--Documentation/devicetree/configfs-overlays.txt31
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst872
-rw-r--r--Documentation/misc-devices/xilinx_flex.txt66
-rw-r--r--Documentation/misc-devices/xilinx_trafgen.txt97
-rw-r--r--MAINTAINERS301
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi69
-rw-r--r--arch/arm/boot/dts/zynq-cc108.dts41
-rw-r--r--arch/arm/boot/dts/zynq-zc702.dts71
-rw-r--r--arch/arm/boot/dts/zynq-zc706.dts54
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm010.dts36
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm011.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm012.dts42
-rw-r--r--arch/arm/boot/dts/zynq-zc770-xm013.dts37
-rw-r--r--arch/arm/boot/dts/zynq-zed.dts51
-rw-r--r--arch/arm/boot/dts/zynq-zybo.dts12
-rw-r--r--arch/arm/configs/xilinx_zynq_defconfig239
-rw-r--r--arch/arm/include/asm/hardirq.h3
-rw-r--r--arch/arm/include/asm/smp.h3
-rw-r--r--arch/arm/kernel/smp.c168
-rw-r--r--arch/arm/mach-zynq/Kconfig16
-rw-r--r--arch/arm/mach-zynq/Makefile7
-rw-r--r--arch/arm/mach-zynq/common.c9
-rw-r--r--arch/arm/mach-zynq/common.h26
-rw-r--r--arch/arm/mach-zynq/efuse.c75
-rw-r--r--arch/arm/mach-zynq/platsmp.c4
-rw-r--r--arch/arm/mach-zynq/pm.c170
-rw-r--r--arch/arm/mach-zynq/slcr.c47
-rw-r--r--arch/arm/mach-zynq/suspend.S185
-rw-r--r--arch/arm/mach-zynq/zynq_ocm.c245
-rw-r--r--arch/arm64/Kconfig.platforms2
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile7
-rw-r--r--arch/arm64/boot/dts/xilinx/avnet-ultra96-rev1.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi97
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi256
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts35
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts32
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts42
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts313
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts382
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts62
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts47
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts331
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts322
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts391
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts3
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts354
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts551
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts376
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts322
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts73
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts121
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts272
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu208-revA.dts658
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu216-revA.dts662
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi573
-rw-r--r--arch/arm64/configs/xilinx_defconfig429
-rw-r--r--arch/arm64/configs/xilinx_versal_defconfig223
-rw-r--r--arch/arm64/configs/xilinx_zynqmp_defconfig388
-rw-r--r--arch/microblaze/Kconfig29
-rw-r--r--arch/microblaze/include/asm/atomic.h265
-rw-r--r--arch/microblaze/include/asm/bitops.h189
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h87
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h2
-rw-r--r--arch/microblaze/include/asm/entry.h12
-rw-r--r--arch/microblaze/include/asm/hardirq.h39
-rw-r--r--arch/microblaze/include/asm/irq.h3
-rw-r--r--arch/microblaze/include/asm/mmu.h5
-rw-r--r--arch/microblaze/include/asm/mmu_context_mm.h82
-rw-r--r--arch/microblaze/include/asm/pgtable.h19
-rw-r--r--arch/microblaze/include/asm/sections.h3
-rw-r--r--arch/microblaze/include/asm/smp.h45
-rw-r--r--arch/microblaze/include/asm/spinlock.h240
-rw-r--r--arch/microblaze/include/asm/spinlock_types.h25
-rw-r--r--arch/microblaze/include/asm/tlbflush.h2
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/cpu/cache.c154
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c38
-rw-r--r--arch/microblaze/kernel/cpu/mb.c207
-rw-r--r--arch/microblaze/kernel/entry.S90
-rw-r--r--arch/microblaze/kernel/head.S165
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S5
-rw-r--r--arch/microblaze/kernel/irq.c21
-rw-r--r--arch/microblaze/kernel/kgdb.c8
-rw-r--r--arch/microblaze/kernel/setup.c20
-rw-r--r--arch/microblaze/kernel/smp.c331
-rw-r--r--arch/microblaze/kernel/syscalls/syscallhdr.sh1
-rw-r--r--arch/microblaze/kernel/timer.c243
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S17
-rw-r--r--arch/microblaze/mm/consistent.c6
-rw-r--r--arch/microblaze/mm/init.c10
-rw-r--r--arch/microblaze/mm/mmu_context.c257
-rw-r--r--arch/microblaze/pci/pci-common.c32
-rw-r--r--drivers/bluetooth/hci_ll.c1
-rw-r--r--drivers/clk/Kconfig48
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-fixed-factor.c8
-rw-r--r--drivers/clk/clk-si5324.c1227
-rw-r--r--drivers/clk/clk-si5324.h140
-rw-r--r--drivers/clk/clk-xlnx-clock-wizard-v.c513
-rw-r--r--drivers/clk/clk-xlnx-clock-wizard.c707
-rw-r--r--drivers/clk/idt/Makefile3
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.c933
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-core.h272
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.c382
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x-debugfs.h21
-rw-r--r--drivers/clk/idt/clk-idt8t49n24x.c641
-rw-r--r--drivers/clk/si5324.h68
-rw-r--r--drivers/clk/si5324drv.c382
-rw-r--r--drivers/clk/si5324drv.h100
-rw-r--r--drivers/clk/zynq/clkc.c43
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h2
-rw-r--r--drivers/clk/zynqmp/clkc.c10
-rw-r--r--drivers/clk/zynqmp/divider.c9
-rw-r--r--drivers/crypto/Kconfig21
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/xilinx/Makefile2
-rw-r--r--drivers/crypto/xilinx/zynqmp-rsa.c238
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c301
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/xilinx/Kconfig47
-rw-r--r--drivers/dma/xilinx/Makefile7
-rw-r--r--drivers/dma/xilinx/axidmatest.c697
-rw-r--r--drivers/dma/xilinx/vdmatest.c663
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2324
-rw-r--r--drivers/dma/xilinx/xilinx_frmbuf.c1709
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie.h44
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c1402
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_main.c200
-rw-r--r--drivers/dma/xilinx/xilinx_ps_pcie_platform.c3170
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c1
-rw-r--r--drivers/edac/Kconfig22
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cortex_arm64_edac.c470
-rw-r--r--drivers/edac/pl310_edac_l2.c233
-rw-r--r--drivers/edac/zynqmp_ocm_edac.c651
-rw-r--r--drivers/firmware/xilinx/Kconfig8
-rw-r--r--drivers/firmware/xilinx/Makefile3
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c261
-rw-r--r--drivers/firmware/xilinx/zynqmp-ggs.c289
-rw-r--r--drivers/firmware/xilinx/zynqmp-secure.c197
-rw-r--r--drivers/firmware/xilinx/zynqmp.c861
-rw-r--r--drivers/fpga/Kconfig37
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/fpga-bridge.c30
-rw-r--r--drivers/fpga/fpga-mgr.c274
-rw-r--r--drivers/fpga/of-fpga-region.c3
-rw-r--r--drivers/fpga/versal-fpga.c153
-rw-r--r--drivers/fpga/xilinx-afi.c92
-rw-r--r--drivers/fpga/zynq-afi.c81
-rw-r--r--drivers/fpga/zynqmp-fpga.c211
-rw-r--r--drivers/gpio/gpio-xilinx.c747
-rw-r--r--drivers/gpio/gpio-zynq.c83
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fourcc.c43
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c6
-rw-r--r--drivers/gpu/drm/i2c/Kconfig7
-rw-r--r--drivers/gpu/drm/i2c/Makefile5
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c1025
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h289
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c31
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig106
-rw-r--r--drivers/gpu/drm/xlnx/Makefile21
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.c563
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_bridge.h178
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.c208
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_crtc.h76
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_csc.c571
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.c542
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_drv.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_dsi.c1011
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.c350
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_fb.h33
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.c46
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_gem.h26
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_mixer.c3040
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_pl_disp.c648
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_scaler.c1980
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi.c1234
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_modes.h356
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.c426
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_sdi_timing.h20
-rw-r--r--drivers/gpu/drm/xlnx/xlnx_vtc.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c3343
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.h36
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c1916
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h38
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c194
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h28
-rw-r--r--drivers/gpu/drm/zocl/Kconfig8
-rw-r--r--drivers/gpu/drm/zocl/Makefile4
-rw-r--r--drivers/gpu/drm/zocl/zocl_bo.c271
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.c217
-rw-r--r--drivers/gpu/drm/zocl/zocl_drv.h59
-rw-r--r--drivers/hwmon/pmbus/Kconfig20
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/tps544.c364
-rw-r--r--drivers/i2c/busses/i2c-cadence.c480
-rw-r--r--drivers/i2c/busses/i2c-xiic.c461
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c7
-rw-r--r--drivers/iio/adc/Kconfig10
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/xilinx-ams.c1109
-rw-r--r--drivers/iio/adc/xilinx-ams.h278
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c99
-rw-r--r--drivers/irqchip/irq-gic.c17
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c236
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c531
-rw-r--r--drivers/media/i2c/ov5640.c12
-rw-r--r--drivers/media/mc/mc-entity.c31
-rw-r--r--drivers/media/platform/xilinx/Kconfig135
-rw-r--r--drivers/media/platform/xilinx/Makefile19
-rw-r--r--drivers/media/platform/xilinx/xilinx-axis-switch.c610
-rw-r--r--drivers/media/platform/xilinx/xilinx-cfa.c394
-rw-r--r--drivers/media/platform/xilinx/xilinx-cresample.c447
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2098
-rw-r--r--drivers/media/platform/xilinx/xilinx-demosaic.c418
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c899
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h23
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma-coeff.h5385
-rw-r--r--drivers/media/platform/xilinx/xilinx-gamma.c543
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls-common.h36
-rw-r--r--drivers/media/platform/xilinx/xilinx-hls.c481
-rw-r--r--drivers/media/platform/xilinx/xilinx-m2m.c2108
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h574
-rw-r--r--drivers/media/platform/xilinx/xilinx-multi-scaler.c2450
-rw-r--r--drivers/media/platform/xilinx/xilinx-remapper.c546
-rw-r--r--drivers/media/platform/xilinx/xilinx-rgb2yuv.c566
-rw-r--r--drivers/media/platform/xilinx/xilinx-scaler.c708
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-channel.c452
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange-dma.c554
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.c195
-rw-r--r--drivers/media/platform/xilinx/xilinx-scenechange.h245
-rw-r--r--drivers/media/platform/xilinx/xilinx-sdirxss.c2371
-rw-r--r--drivers/media/platform/xilinx/xilinx-switch.c460
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c627
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c177
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h14
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c224
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-csc.c1170
-rw-r--r--drivers/media/platform/xilinx/xilinx-vpss-scaler.c2108
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c18
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h1
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c13
-rw-r--r--drivers/media/usb/uvc/uvc_video.c62
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h5
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c41
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c28
-rw-r--r--drivers/misc/Kconfig22
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/jesd204b/Kconfig28
-rw-r--r--drivers/misc/jesd204b/Makefile5
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.c88
-rw-r--r--drivers/misc/jesd204b/gtx7s_cpll_bands.h31
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.c96
-rw-r--r--drivers/misc/jesd204b/gtx7s_qpll_bands.h30
-rw-r--r--drivers/misc/jesd204b/jesd_phy.c384
-rw-r--r--drivers/misc/jesd204b/jesd_phy.h42
-rw-r--r--drivers/misc/jesd204b/s7_gtxe2_drp.h123
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.c399
-rw-r--r--drivers/misc/jesd204b/xilinx_jesd204b.h135
-rw-r--r--drivers/misc/xilinx_flex_pm.c657
-rw-r--r--drivers/misc/xilinx_trafgen.c1654
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c218
-rw-r--r--drivers/mtd/chips/cfi_probe.c45
-rw-r--r--drivers/mtd/nand/raw/Kconfig14
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/arasan_nand.c1527
-rw-r--r--drivers/mtd/nand/raw/nand_base.c8
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c2
-rw-r--r--drivers/mtd/nand/raw/pl353_nand.c1234
-rw-r--r--drivers/mtd/spi-nor/controllers/cadence-quadspi.c938
-rw-r--r--drivers/mtd/spi-nor/core.c753
-rw-r--r--drivers/mtd/spi-nor/core.h8
-rw-r--r--drivers/mtd/spi-nor/issi.c32
-rw-r--r--drivers/mtd/spi-nor/macronix.c2
-rw-r--r--drivers/mtd/spi-nor/micron-st.c46
-rw-r--r--drivers/mtd/spi-nor/spansion.c6
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h32
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c328
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c24
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig69
-rw-r--r--drivers/net/ethernet/xilinx/Makefile12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h761
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_dma.c504
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3064
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c1043
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c47
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_cb.c177
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ep.c161
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c223
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h159
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h88
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c325
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c369
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_qci.c151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c232
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h151
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.c807
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_switch.h364
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_tsn_timer.h73
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83867.c45
-rw-r--r--drivers/net/phy/mscc/mscc.h3
-rw-r--r--drivers/net/phy/mscc/mscc_main.c41
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/xilinx_phy.c160
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c175
-rw-r--r--drivers/of/Kconfig11
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/configfs.c294
-rw-r--r--drivers/pci/controller/Kconfig8
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/pcie-xdma-pl.c882
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c12
-rw-r--r--drivers/phy/Kconfig9
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-zynqmp.c1583
-rw-r--r--drivers/pinctrl/Kconfig8
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c7
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c1071
-rw-r--r--drivers/remoteproc/Kconfig20
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/remoteproc_internal.h23
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c107
-rw-r--r--drivers/remoteproc/zynq_remoteproc.c463
-rw-r--r--drivers/remoteproc/zynqmp_r5_remoteproc.c979
-rw-r--r--drivers/rtc/rtc-zynqmp.c78
-rw-r--r--drivers/soc/xilinx/Kconfig3
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c579
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_clk.c916
-rw-r--r--drivers/soc/xilinx/xlnx_vcu_core.c168
-rw-r--r--drivers/spi/Kconfig10
-rw-r--r--drivers/spi/spi-mem.c21
-rw-r--r--drivers/spi/spi-xilinx.c897
-rw-r--r--drivers/spi/spi-zynq-qspi.c408
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c408
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/staging/Kconfig12
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/apf/Kconfig20
-rw-r--r--drivers/staging/apf/Makefile9
-rw-r--r--drivers/staging/apf/dt-binding.txt17
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.c1232
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.h234
-rw-r--r--drivers/staging/apf/xlnk-eng.c242
-rw-r--r--drivers/staging/apf/xlnk-eng.h33
-rw-r--r--drivers/staging/apf/xlnk-ioctl.h37
-rw-r--r--drivers/staging/apf/xlnk-sysdef.h34
-rw-r--r--drivers/staging/apf/xlnk.c1580
-rw-r--r--drivers/staging/apf/xlnk.h175
-rw-r--r--drivers/staging/clocking-wizard/Kconfig10
-rw-r--r--drivers/staging/clocking-wizard/Makefile2
-rw-r--r--drivers/staging/clocking-wizard/TODO12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c333
-rw-r--r--drivers/staging/fclk/Kconfig9
-rw-r--r--drivers/staging/fclk/Makefile1
-rw-r--r--drivers/staging/fclk/TODO2
-rw-r--r--drivers/staging/fclk/dt-binding.txt16
-rw-r--r--drivers/staging/fclk/xilinx_fclk.c115
-rw-r--r--drivers/staging/xlnx_tsmux/Kconfig11
-rw-r--r--drivers/staging/xlnx_tsmux/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_tsmux/Makefile1
-rw-r--r--drivers/staging/xlnx_tsmux/dt-binding.txt28
-rw-r--r--drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c1568
-rw-r--r--drivers/staging/xlnxsync/Kconfig11
-rw-r--r--drivers/staging/xlnxsync/MAINTAINERS4
-rw-r--r--drivers/staging/xlnxsync/Makefile1
-rw-r--r--drivers/staging/xlnxsync/dt-binding.txt34
-rw-r--r--drivers/staging/xlnxsync/xlnxsync.c1301
-rw-r--r--drivers/staging/xroeframer/Kconfig18
-rw-r--r--drivers/staging/xroeframer/Makefile12
-rw-r--r--drivers/staging/xroeframer/README47
-rw-r--r--drivers/staging/xroeframer/dt-binding.txt17
-rw-r--r--drivers/staging/xroeframer/roe_framer_ctrl.h1088
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe.c562
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c718
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c571
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_stats.c401
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_udp.c181
-rw-r--r--drivers/staging/xroeframer/xroe_framer.c155
-rw-r--r--drivers/staging/xroeframer/xroe_framer.h63
-rw-r--r--drivers/staging/xroetrafficgen/Kconfig14
-rw-r--r--drivers/staging/xroetrafficgen/Makefile8
-rw-r--r--drivers/staging/xroetrafficgen/README19
-rw-r--r--drivers/staging/xroetrafficgen/dt-binding.txt15
-rw-r--r--drivers/staging/xroetrafficgen/roe_radio_ctrl.h183
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c824
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.c124
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.h15
-rw-r--r--drivers/tty/serial/8250/8250_of.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c34
-rw-r--r--drivers/tty/serial/xilinx_uartps.c30
-rw-r--r--drivers/uio/Kconfig24
-rw-r--r--drivers/uio/Makefile4
-rw-r--r--drivers/uio/uio_core.c (renamed from drivers/uio/uio.c)43
-rw-r--r--drivers/uio/uio_dmabuf.c210
-rw-r--r--drivers/uio/uio_dmabuf.h26
-rw-r--r--drivers/uio/uio_xilinx_ai_engine.c296
-rw-r--r--drivers/uio/uio_xilinx_apm.c358
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c5
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg_fsm.c9
-rw-r--r--drivers/usb/dwc3/Kconfig10
-rw-r--r--drivers/usb/dwc3/Makefile11
-rw-r--r--drivers/usb/dwc3/core.c237
-rw-r--r--drivers/usb/dwc3/core.h101
-rw-r--r--drivers/usb/dwc3/debugfs.c50
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c514
-rw-r--r--drivers/usb/dwc3/ep0.c43
-rw-r--r--drivers/usb/dwc3/gadget.c325
-rw-r--r--drivers/usb/dwc3/gadget.h19
-rw-r--r--drivers/usb/dwc3/gadget_hibernation.c568
-rw-r--r--drivers/usb/dwc3/host.c32
-rw-r--r--drivers/usb/dwc3/otg.c2190
-rw-r--r--drivers/usb/dwc3/otg.h244
-rw-r--r--drivers/usb/dwc3/platform_data.h54
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/function/f_tcm.c303
-rw-r--r--drivers/usb/gadget/function/tcm.h7
-rw-r--r--drivers/usb/gadget/function/uvc_video.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c80
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c12
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-plat.c60
-rw-r--r--drivers/usb/host/xhci-ring.c107
-rw-r--r--drivers/usb/host/xhci.c29
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/phy/Kconfig1
-rw-r--r--drivers/usb/phy/phy-ulpi.c100
-rw-r--r--drivers/usb/storage/uas.c310
-rw-r--r--drivers/usb/storage/unusual_uas.h6
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c297
-rw-r--r--include/drm/drm_fourcc.h21
-rw-r--r--include/dt-bindings/drm/mipi-dsi.h11
-rw-r--r--include/dt-bindings/media/xilinx-vip.h6
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h9
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynqmp.h36
-rw-r--r--include/dt-bindings/power/xlnx-versal-power.h42
-rw-r--r--include/linux/clk/zynq.h4
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/dma/xilinx_frmbuf.h209
-rw-r--r--include/linux/dma/xilinx_ps_pcie_dma.h69
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h329
-rw-r--r--include/linux/fpga/fpga-mgr.h39
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/onfi.h3
-rw-r--r--include/linux/mtd/rawnand.h18
-rw-r--r--include/linux/mtd/spi-nor.h38
-rw-r--r--include/linux/phy/phy-zynqmp.h52
-rw-r--r--include/linux/remoteproc.h6
-rw-r--r--include/linux/soc/xilinx/zynqmp/fw.h37
-rw-r--r--include/linux/spi/spi.h21
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/xhci_pdriver.h26
-rw-r--r--include/linux/xilinx_phy.h20
-rw-r--r--include/media/media-entity.h8
-rw-r--r--include/media/v4l2-subdev.h4
-rw-r--r--include/soc/xilinx/xlnx_vcu.h39
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/linux/media-bus-format.h18
-rw-r--r--include/uapi/linux/uio/uio.h65
-rw-r--r--include/uapi/linux/v4l2-mediabus.h3
-rw-r--r--include/uapi/linux/v4l2-subdev.h23
-rw-r--r--include/uapi/linux/videodev2.h25
-rw-r--r--include/uapi/linux/xilinx-csi2rxss.h20
-rw-r--r--include/uapi/linux/xilinx-hls.h23
-rw-r--r--include/uapi/linux/xilinx-sdirxss.h66
-rw-r--r--include/uapi/linux/xilinx-v4l2-controls.h161
-rw-r--r--include/uapi/linux/xilinx-v4l2-events.h25
-rw-r--r--include/uapi/linux/xlnx_mpg2tsmux_interface.h252
-rw-r--r--include/uapi/linux/xlnxsync.h147
-rw-r--r--include/uapi/linux/zocl_ioctl.h125
-rw-r--r--mm/page_alloc.c2
-rw-r--r--samples/xilinx_apm/Makefile71
-rw-r--r--samples/xilinx_apm/main.c134
-rw-r--r--samples/xilinx_apm/xaxipmon.c1269
-rw-r--r--samples/xilinx_apm/xaxipmon.h943
-rw-r--r--sound/soc/xilinx/Kconfig39
-rw-r--r--sound/soc/xilinx/Makefile9
-rw-r--r--sound/soc/xilinx/xilinx-dp-card.c123
-rw-r--r--sound/soc/xilinx/xilinx-dp-codec.c178
-rw-r--r--sound/soc/xilinx/xilinx-dp-pcm.c76
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c343
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c113
-rw-r--r--sound/soc/xilinx/xlnx_pl_snd_card.c457
-rw-r--r--sound/soc/xilinx/xlnx_sdi_audio.c610
-rw-r--r--sound/soc/xilinx/xlnx_snd_common.h23
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c139
612 files changed, 137716 insertions, 4448 deletions
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 000000000000..64fabaf494d8
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,3 @@
+Please do not submit a Pull Request via github. Our project makes use of
+mailing lists for patch submission and review. For more details please see
+https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842172/Create+and+Submit+a+Patch
diff --git a/Documentation/ABI/stable/sysfs-firmware-zynqmp b/Documentation/ABI/stable/sysfs-firmware-zynqmp
new file mode 100644
index 000000000000..eeae291a048c
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-firmware-zynqmp
@@ -0,0 +1,103 @@
+What: /sys/firmware/zynqmp/ggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU global general storage register value,
+ GLOBAL_GEN_STORAGE{0:3}.
+ Global general storage register that can be used
+ by system to pass information between masters.
+
+ The register is reset during system or power-on
+ resets. Three registers are used by the FSBL and
+ other Xilinx software products: GLOBAL_GEN_STORAGE{4:6}.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/ggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/ggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/pggs*
+Date: January 2018
+KernelVersion: 4.15.0
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU persistent global general storage register
+ value, PERS_GLOB_GEN_STORAGE{0:3}.
+ Persistent global general storage register that
+ can be used by system to pass information between
+ masters.
+
+ This register is only reset by the power-on reset
+ and maintains its value through a system reset.
+ Four registers are used by the FSBL and other Xilinx
+ software products: PERS_GLOB_GEN_STORAGE{4:7}.
+ Register is reset only by a POR reset.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo <mask> <value> > /sys/firmware/zynqmp/pggs0
+
+ Example:
+ # cat /sys/firmware/zynqmp/pggs0
+ # echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/pggs0
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/shutdown_scope
+Date: February 2018
+KernelVersion: 4.15.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ This sysfs interface allows to set the shutdown scope for the
+ next shutdown request. When the next shutdown is performed, the
+ platform specific portion of PSCI-system_off can use the chosen
+ shutdown scope.
+
+ Following are available shutdown scopes(subtypes):
+
+ subsystem: Only the APU along with all of its peripherals
+ not used by other processing units will be
+ shut down. This may result in the FPD power
+ domain being shut down provided that no other
+ processing unit uses FPD peripherals or DRAM.
+ ps_only: The complete PS will be shut down, including the
+ RPU, PMU, etc. Only the PL domain (FPGA)
+ remains untouched.
+ system: The complete system/device is shut down.
+
+ Usage:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+
+ Example:
+ # cat /sys/firmware/zynqmp/shutdown_scope
+ # echo "subsystem" > /sys/firmware/zynqmp/shutdown_scope
+
+Users: Xilinx
+
+What: /sys/firmware/zynqmp/health_status
+Date: April 2018
+KernelVersion: 4.14.0
+Contact: "Rajan Vaja" <rajanv@xilinx.com>
+Description:
+ This sysfs interface allows to set the health status. If PMUFW
+ is compiled with CHECK_HEALTHY_BOOT, it will check the healthy
+ bit on FPD WDT expiration. If healthy bit is set by a user
+ application running in Linux, PMUFW will do APU only restart. If
+ healthy bit is not set during FPD WDT expiration, PMUFW will do
+ system restart.
+
+ Usage:
+ Set healty bit
+ # echo 1 > /sys/firmware/zynqmp/health_status
+
+ Unset healty bit
+ # echo 0 > /sys/firmware/zynqmp/health_status
+
+Users: Xilinx
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
index 312ae2c579d8..676700d7a61f 100644
--- a/Documentation/ABI/testing/sysfs-class-fpga-bridge
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -9,3 +9,12 @@ Date: January 2016
KernelVersion: 4.5
Contact: Alan Tull <atull@opensource.altera.com>
Description: Show bridge state as "enabled" or "disabled"
+
+What: /sys/class/fpga_bridge/<bridge>/set
+Date: January 2017
+KernelVersion: 4.9
+Contact: Michal Simek <michal.simek@xilinx.com>
+Description: Manual set bridge state (0-disable, !0 enable).
+ Enabling this option requires that the module is
+ compiled with #define DEBUG which is enabled by default
+ when CONFIG_DEBUG_KERNEL is setup.
diff --git a/Documentation/ABI/testing/sysfs-driver-cortexa53-edac b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
new file mode 100644
index 000000000000..87ed5ca3af22
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-cortexa53-edac
@@ -0,0 +1,10 @@
+What: /sys/devices/system/edac/cpu_cache/inject_(L1/L2)_Cache_Error
+Date: June 2016
+Contact: nagasure@xilinx.com
+ punnaia@xilinx.com
+Description: This control file allows to inject cache errors on cortexa53
+ L1 and L2 caches. arm provided error injection for cortexa53
+ caches (L1 and L2). Just echo 1 > /sys/devices/system/edac/
+ cpu_cache/inject_L1_Error for L1 cache error injection and
+ echo 1 > /sys/devices/system/edac/cpu_cache/inject_L2_Error
+ for L2 cache error injection.
diff --git a/Documentation/devicetree/bindings/arm/xilinx.yaml b/Documentation/devicetree/bindings/arm/xilinx.yaml
index c73b1f5c7f49..265020976bfd 100644
--- a/Documentation/devicetree/bindings/arm/xilinx.yaml
+++ b/Documentation/devicetree/bindings/arm/xilinx.yaml
@@ -58,12 +58,6 @@ properties:
- const: xlnx,zynqmp-zc1254
- const: xlnx,zynqmp
- - description: Xilinx internal board zc1275
- items:
- - const: xlnx,zynqmp-zc1275-revA
- - const: xlnx,zynqmp-zc1275
- - const: xlnx,zynqmp
-
- description: Xilinx 96boards compatible board zcu100
items:
- const: xlnx,zynqmp-zcu100-revC
@@ -91,6 +85,7 @@ properties:
items:
- enum:
- xlnx,zynqmp-zcu104-revA
+ - xlnx,zynqmp-zcu104-revC
- xlnx,zynqmp-zcu104-rev1.0
- const: xlnx,zynqmp-zcu104
- const: xlnx,zynqmp
@@ -107,8 +102,41 @@ properties:
items:
- enum:
- xlnx,zynqmp-zcu111-revA
- - xlnx,zynqmp-zcu11-rev1.0
+ - xlnx,zynqmp-zcu111-rev1.0
- const: xlnx,zynqmp-zcu111
- const: xlnx,zynqmp
+ - description: Xilinx evaluation board zcu208
+ items:
+ - enum:
+ - xlnx,zynqmp-zcu208-revA
+ - xlnx,zynqmp-zcu208-rev1.0
+ - const: xlnx,zynqmp-zcu208
+ - const: xlnx,zynqmp
+
+ - description: Xilinx evaluation board zcu216
+ items:
+ - enum:
+ - xlnx,zynqmp-zcu216-revA
+ - xlnx,zynqmp-zcu216-rev1.0
+ - const: xlnx,zynqmp-zcu216
+ - const: xlnx,zynqmp
+
+ - description: Xilinx evaluation board zcu1275
+ items:
+ - enum:
+ - xlnx,zynqmp-zcu1275-revA
+ - xlnx,zynqmp-zcu1275-revB
+ - xlnx,zynqmp-zcu1275-rev1.0
+ - const: xlnx,zynqmp-zcu1275
+ - const: xlnx,zynqmp
+
+ - description: Xilinx evaluation board zcu1285
+ items:
+ - enum:
+ - xlnx,zynqmp-zcu1285-revA
+ - xlnx,zynqmp-zcu1285-rev1.0
+ - const: xlnx,zynqmp-zcu1285
+ - const: xlnx,zynqmp
+
...
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
new file mode 100644
index 000000000000..39817e9750c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-efuse.txt
@@ -0,0 +1,15 @@
+Device tree bindings for Zynq's eFuse Controller
+
+The Zynq eFuse controller provides the access to the chip efuses which contain
+information about device DNA, security settings and also device status.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-efuse".
+ reg: Specify the base and size of the EFUSE controller registers
+ in the memory map. E.g.: reg = <0xf800d000 0x20>;
+
+Example:
+efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+};
diff --git a/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
new file mode 100644
index 000000000000..b6dbf05b4eb5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zynq/zynq-ocmc.txt
@@ -0,0 +1,17 @@
+Device tree bindings for Zynq's OCM Controller
+
+The OCM is divided to 4 64kB segments which can be separately configured
+to low or high location. Location is controlled via SLCR.
+
+Required properties:
+ compatible: Compatibility string. Must be "xlnx,zynq-ocmc-1.0".
+ reg: Specify the base and size of the OCM controller registers
+ in the memory map. E.g.: reg = <0xf800c000 0x1000>;
+
+Example:
+ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+} ;
diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
index 7561cc4de371..d34f11771d5f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
@@ -38,6 +38,8 @@ Required properties:
Optional properties:
- ceva,broken-gen2: limit to gen1 speed instead of gen2.
+ - dma-coherent: Enable this flag if CCI is enabled in design.
+ Adding this flag configures AXI cache control register.
Examples:
ahci@fd0c0000 {
@@ -56,4 +58,5 @@ Examples:
ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
ceva,broken-gen2;
+ dma-coherent;
};
diff --git a/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
new file mode 100644
index 000000000000..8b52017cf1c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/idt,idt8t49n24x.txt
@@ -0,0 +1,156 @@
+Binding for IDT 8T49N24x Universal Frequency Translator
+
+The 8T49N241 has one fractional-feedback PLL that can be used as a
+jitter attenuator and frequency translator. It is equipped with one
+integer and three fractional output dividers, allowing the generation
+of up to four different output frequencies, ranging from 8kHz to 1GHz.
+These frequencies are completely independent of each other, the input
+reference frequencies and the crystal reference frequency. The device
+places virtually no constraints on input to output frequency conversion,
+supporting all FEC rates, including the new revision of ITU-T
+Recommendation G.709 (2009), most with 0ppm conversion error.
+The outputs may select among LVPECL, LVDS, HCSL or LVCMOS output levels.
+
+The driver can read a full register map from the DT, and will use that
+register map to initialize the attached part (via I2C) when the system
+boots. Any configuration not supported by the common clock framework
+must be done via the full register map, including optimized settings.
+
+The 8T49N241 accepts up to two differential or single-ended input clocks
+and a fundamental-mode crystal input. The internal PLL can lock to either
+of the input reference clocks or just to the crystal to behave as a
+frequency synthesizer. The PLL can use the second input for redundant
+backup of the primary input reference, but in this case, both input clock
+references must be related in frequency.
+
+All outputs are currently assumed to be LVDS, unless overridden in the
+full register map in the DT.
+
+==I2C device node==
+
+Required properties:
+- compatible: shall be one of "idt,idt8t49n241"
+- reg: i2c device address, shall be one of 0x7C, 0x6C, 0x7D, 0x6D,
+ 0x7E, 0x6E, 0x7F, 0x6F.
+- #clock-cells: From common clock bindings: Shall be 1.
+
+- clocks: from common clock binding; input clock handle. Required.
+- clock-names: from common clock binding; clock input names, shall be
+ one of "input-clk0", "input-clk1", "input-xtal". Required.
+
+==Mapping between clock specifier and physical pins==
+
+When referencing the provided clock in the DT using phandle and
+clock specifier, the following mapping applies:
+
+8T49N241:
+ 0 -- Q0
+ 1 -- Q1
+ 2 -- Q2
+ 3 -- Q3
+
+==Example==
+
+/* Example1: 25MHz input clock (via CLK0) */
+
+ref25: ref25m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-clk0 to 25MHz reference */
+ clocks = <&ref25m>;
+ clock-names = "input-clk0";
+ };
+};
+
+/* Consumer referencing the 8T49N241 pin Q1 */
+consumer {
+ ...
+ clocks = <&i2c241 1>;
+ ...
+}
+
+/* Example2: 40MHz xtal frequency, specify all settings */
+
+ref40: ref40m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+};
+
+i2c-master-node {
+
+ /* IDT 8T49N241 i2c universal frequency translator */
+ i2c241: clock-generator@6a {
+ compatible = "idt,idt8t49n241";
+ reg = <0x6c>;
+ #clock-cells = <1>;
+
+ /* Connect input-xtal to 40MHz reference */
+ clocks = <&ref40m>;
+ clock-names = "input-xtal";
+
+ settings=[
+09 50 00 60 67 C5 6C FF 03 00 30 00 00 01 00 00
+01 07 00 00 07 00 00 77 6D 06 00 00 00 00 00 FF
+FF FF FF 00 3F 00 2A 00 16 33 33 00 01 00 00 D0
+00 00 00 00 00 00 00 00 00 04 00 00 00 02 00 00
+00 00 00 00 00 00 00 17 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 D7 0A 2B 20 00 00 00 0B
+00 00 00 00 00 00 00 00 00 00 27 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+C3 00 08 01 00 00 00 00 00 00 00 00 00 30 00 00
+00 0A 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+00 00 00 00 85 00 00 9C 01 D4 02 71 07 00 00 00
+00 83 00 10 02 08 8C
+];
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5324.txt b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
new file mode 100644
index 000000000000..642af113aa6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si5324.txt
@@ -0,0 +1,78 @@
+Binding for Silicon Labs si5324, si5328 and si5319 programmable
+I2C clock generator.
+
+Reference
+This binding uses the common clock binding[1].
+The si5324 is programmable i2c low-bandwidth, jitter-attenuating, precision
+clock multiplier with up to 2 output clocks. The internal structure can be
+found in [2].
+The internal pin structure of si5328 and si5319 can be found in [3].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Si5324 Data Sheet
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5324.pdf
+[3] Si53xx Reference Manual
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/
+ Si53xxReferenceManual.pdf
+
+==I2C device node==
+
+Required properties:
+- compatible: should be one of
+ "silabs,si5324"
+ "silabs,si5319"
+ "silabs,si5328"
+- reg: i2c device address.
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+ handles, clock name should be one of
+ "xtal"
+ "clkin1"
+ "clkin2"
+- #address-cells: shall be set to 1.
+- #size-cells: shall be set to 0.
+
+Optional properties:
+- silabs,pll-source: pair of (number, source) for each pll. Allows
+ to overwrite clock source of pll.
+
+==Child nodes==
+
+Each of the clock outputs can be overwritten individually by
+using a child node to the I2C device node. If a child node for a clock
+output is not set, the eeprom configuration is not overwritten.
+
+Required child node properties:
+- reg: number of clock output.
+- clock-frequency: default output frequency at power on
+
+Optional child node properties:
+- silabs,drive-strength: output drive strength in mA, shall be one of {2,4,6,8}.
+
+Example:
+Following example describes the ZCU102 board with hdmi design which
+uses si5319 as clock generator. XTAL is hard-wired on the board to act
+as input clock with a frequency of 114.285MHz.
+
+refhdmi: refhdmi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <114285000>;
+};
+
+/* Si5319 i2c clock generator */
+si5319: clock-generator@68 {
+ status = "okay";
+ compatible = "silabs,si5319";
+ reg = <0x68>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&refhdmi>;
+ clock-names = "xtal";
+
+ clk0 {
+ reg = <0>;
+ clock-frequency = <27000000>;
+ };
+};
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/Documentation/devicetree/bindings/clock/xlnx,clocking-wizard.txt
index 723271e93316..aedac845d49a 100644
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ b/Documentation/devicetree/bindings/clock/xlnx,clocking-wizard.txt
@@ -9,6 +9,7 @@ http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-
Required properties:
- compatible: Must be 'xlnx,clocking-wizard'
+ - #clock-cells: Number of cells in a clock specifier. Should be 1
- reg: Base and size of the cores register space
- clocks: Handle to input clock
- clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
@@ -19,6 +20,7 @@ Optional properties:
Example:
clock-generator@40040000 {
+ #clock-cells = <1>;
reg = <0x40040000 0x1000>;
compatible = "xlnx,clocking-wizard";
speed-grade = <1>;
diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-wiz.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-wiz.yaml
new file mode 100644
index 000000000000..00d657280833
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/xlnx,versal-wiz.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/xlnx,versal-wiz.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Versal clocking wizard
+
+maintainers:
+ - Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+
+description: |
+ The clocking wizard is a soft ip clocking block of Xilinx versal. It
+ reads required input clock frequencies from the devicetree and acts as clock
+ clock output.
+
+select: false
+
+properties:
+ compatible:
+ const: xlnx,clk-wizard-1.0
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ description: List of clock specifiers which are external input
+ clocks to the given clock controller.
+ items:
+ - description: clock input
+ - description: axi clock
+
+ clock-names:
+ items:
+ - const: clk_in1
+ - const: s_axi_aclk
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-generator@40040000 {
+ #clock-cells = <1>;
+ reg = <0x40040000 0x1000>;
+ compatible = "xlnx,clk-wizard-1.0";
+ speed-grade = <1>;
+ clock-names = "clk_in1", "s_axi_aclk";
+ clocks = <&clkc 15>, <&clkc 15>;clock-output-names = "clk_out1", "clk_out2",
+ "clk_out3", "clk_out4", "clk_out5",
+ "clk_out6", "clk_out7";
+ };
+...
diff --git a/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
new file mode 100644
index 000000000000..226bfb9261d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP AES hw acceleration support
+
+The ZynqMP PS-AES hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-aes"
+
+Example:
+ zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
new file mode 100644
index 000000000000..6b4c0e0446fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP RSA hw acceleration support
+
+The zynqmp PS-RSA hw accelerator is used to encrypt/decrypt
+the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-rsa"
+
+Example:
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
new file mode 100644
index 000000000000..c7be6e2ce246
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
@@ -0,0 +1,12 @@
+Xilinx ZynqMP SHA3(keccak-384) hw acceleration support.
+
+The ZynqMp PS-SHA hw accelerator is used to calculate the
+SHA3(keccak-384) hash value on the given user data.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-keccak-384"
+
+Example:
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/bridge.txt b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
new file mode 100644
index 000000000000..c5f7c0a1dea0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/bridge.txt
@@ -0,0 +1,29 @@
+Xilinx DRM bridge
+-----------------
+
+The Xilinx DRM provides the interface layer called Xilinx bridge to bridge
+multiple components with a series of functions. It models a simple
+unidirectional communication, single client -> single bridge. The client
+is not limited to DRM compatible drivers, but can be any subsystem driver,
+but the client driver should call the bridge functions explicitly.
+
+Provider
+--------
+
+The bridge provider should assign a corresponding of_node to struct xlnx_bridge.
+For example, if its own node is used,
+
+ provider_node: provider_node {
+ };
+
+ bridge.of_node = provider_device->of_node;
+
+Client
+------
+
+The bridge client should have a phandle to the bridge device node. The bridge
+device node should be passed to get a bridge instance,
+
+ client_node {
+ xlnx,bridge = <&provider_node>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
new file mode 100644
index 000000000000..a545a0d818e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,dsi.txt
@@ -0,0 +1,74 @@
+Device-Tree bindings for Xilinx MIPI DSI Tx IP core
+
+The IP core supports transmission of video data in MIPI DSI protocol.
+
+Required properties:
+ - compatible: Should be "xlnx,dsi".
+
+ - reg: Base address and size of the IP core.
+
+ - xlnx,dsi-datatype: Color format. The value should be one of "MIPI_DSI_FMT_RGB888",
+ "MIPI_DSI_FMT_RGB666", "MIPI_DSI_FMT_RGB666_PACKED" or "MIPI_DSI_FMT_RGB565".
+
+ - simple_panel: The subnode for connected panel. This represents the
+ DSI peripheral connected to the DSI host node. Please refer to
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt. The
+ simple-panel driver has auo,b101uan01 panel timing parameters added along
+ with other existing panels. DSI driver derive the required Tx IP controller
+ timing values from the panel timing parameters.
+
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+
+ - xlnx,dsi-num-lanes: Possible number of DSI lanes for the Tx controller.
+ The values should be 1, 2, 3 or 4. Based on xlnx,dsi-num-lanes and
+ line rate for the MIPI D-PHY core in Mbps, the AXI4-stream received by
+ Xilinx MIPI DSI Tx IP core adds markers as per DSI protocol and the packet
+ thus framed is convered to serial data by MIPI D-PHY core. Please refer
+ Xilinx pg238 for more details. This value should be equal to the number
+ of lanes supported by the connected DSI panel. Panel has to support this
+ value or has to be programmed to the same value that DSI Tx controller is
+ configured to.
+
+ - clocks: List of phandles to Video and 200Mhz DPHY clocks.
+
+ - clock-names: Must contain "s_axis_aclk" and "dphy_clk_200M" in same order as
+ clocks listed in clocks property.
+
+Required simple_panel properties:
+ - compatible: Value should be one of the panel names in
+ Documentation/devicetree/bindings/display/panel/. e.g. "auo,b101uan01".
+ For available panel compatible strings, please refer to bindings in
+ Documentation/devicetree/bindings/display/panel/
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to DSI as bridge.
+ - xlnx,dsi-cmd-mode: denotes command mode enable.
+
+Example:
+
+#include <dt-bindings/drm/mipi-dsi.h>
+ mipi_dsi_tx_subsystem@80000000 {
+ compatible = "xlnx,dsi";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dsi-num-lanes = <4>;
+ xlnx,dsi-data-type = <MIPI_DSI_FMT_RGB888>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "dphy_clk_200M", "s_axis_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ encoder_dsi_port: port@0 {
+ reg = <0>;
+ dsi_encoder: endpoint {
+ remote-endpoint = <&xyz_port>;
+ };
+ };
+ simple_panel: simple-panel@0 {
+ compatible = "auo,b101uan01";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
new file mode 100644
index 000000000000..a0b2fc05d5aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,mixer.txt
@@ -0,0 +1,166 @@
+Device-Tree bindings for Xilinx Video Mixer IP core
+
+The IP core provides a flexible video processing block for alpha blending
+and compositing multiple video and/or graphics layers.
+Support for up to sixteen layers based on IP version, with an optional logo
+layer, using a combination of video inputs from either frame buffer or
+streaming video cores (through AXI4-Stream interfaces) is provided.
+The Video Mixer always has one streaming input layer, known as master layer.
+
+Required properties:
+ - compatible: Must contain atleast one of
+ "xlnx,mixer-5.0" (MIXER 5.0 version)
+ "xlnx,mixer-4.0" (MIXER 4.0 version)
+ "xlnx,mixer-3.0" (MIXER 3.0 version)
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reset-gpio: gpio to reset the mixer IP
+ - xlnx,enable-csc-coefficient-register: denotes colorimetry
+ coefficients can be programmed, valid for mixer 5.0 version only.
+ - xlnx,dma-addr-width: dma address width, valid values are 32 and 64
+ - xlnx,bpc: bits per component for mixer
+ - xlnx,ppc: pixel per clock for mixer
+ - xlnx,num-layers: Total number of layers (excluding logo)
+ Value ranges from 1-9 for compatible string xlnx,mixer-3.0 and
+ Value ranges from 1-17 for comptaible string xlnx,mixer-4.0 and above
+ - layer_[x]: node for [x] layer
+ - xlnx,layer-id: layer identifier number
+ - xlnx,vformat: video format for layer. See list of supported formats below.
+ - xlnx,layer-max-width: max layer width, mandatory for master layer
+ for overlay layers if scaling is alowed then this is mandatory otherwise
+ not required for overlay layers
+ - xlnx,layer-max-height: max layer height, mandatory for master layer
+ Not required for overlay layers
+ - xlnx,layer-primary: denotes the primary layer, should be mentioned in node
+ of layer which is expected to be constructing the primary plane
+
+Optional properties:
+ - dmas: dma attach to layer, mandatory for master layer
+ for rest other layers its optional
+ - dma-names: Should be "dma0", for more details on DMA identifier string
+ refer Documentation/devicetree/bindings/dma/dma.txt
+ - xlnx,layer-streaming: denotes layer can be streaming,
+ mandatory for master layer. Streaming layers need external dma, where
+ as non streaming layers read directly from memory.
+ - xlnx,layer-alpha: denotes layer can do alpha compositing
+ - xlnx,layer-scale: denotes layer can be scale to 2x and 4x
+ - xlnx,logo-layer: denotes logo layer is enable
+ - logo: logo layer
+ - xlnx,bridge: phandle to bridge node.
+ This handle is required only when VTC is connected as bridge.
+
+Supported Formats:
+ Mixer IP Format Driver supported Format String
+ BGR888 "RG24"
+ RGB888 "BG24"
+ XBGR2101010 "XB30"
+ XRGB8888 "XR24"
+ RGBA8888 "RA24"
+ ABGR8888 "AB24"
+ ARGB8888 "AR24"
+ XBGR8888 "XB24"
+ YUYV "YUYV"
+ UYVY "UYVY"
+ AYUV "AYUV"
+ NV12 "NV12"
+ NV16 "NV16"
+ Y8 "GREY"
+ Y10 "Y10 " (Note: Space included)
+ XVUY2101010 "XV30"
+ VUY888 "VU24"
+ XVUY8888 "XV24"
+ XV15 "XV15"
+ XV20 "XV20"
+Note : Format strings are case sensitive.
+
+Example:
+ v_mix_0: v_mix@80100000 {
+ compatible = "xlnx,mixer-3.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 93 4>;
+ reg = <0x0 0x80100000 0x0 0x80000>;
+
+ xlnx,dma-addr-width=<32>;
+ reset-gpios = <&gpio 1 1>;
+
+ xlnx,bpc = <8>;
+ xlnx,ppc = <2>;
+ xlnx,num-layers = <8>;
+ xlnx,logo-layer;
+ xlnx,bridge = <&v_tc_0>;
+
+ mixer_port: mixer_port@0 {
+ reg = <0>;
+ mixer_crtc: endpoint {
+ remote-endpoint = <&sdi_encoder>;
+ };
+ };
+ xv_mix_master: layer_0 {
+ xlnx,layer-id = <0>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-max-width = <4096>;
+ xlnx,layer-height = <2160>;
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-primary;
+ };
+ xv_mix_overlay_1: layer_1 {
+ xlnx,layer-id = <1>;
+ xlnx,vformat = "NV16";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_2: layer_2 {
+ xlnx,layer-id = <2>;
+ xlnx,vformat = "YUYV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_3: layer_3 {
+ xlnx,layer-id = <3>;
+ xlnx,vformat = "AYUV";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_4: layer_4 {
+ xlnx,layer-id = <4>;
+ xlnx,vformat = "GREY";
+ dmas = <&scaler_v_frmbuf_rd_0 0>;
+ dma-names = "dma0";
+ xlnx,layer-streaming;
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_5: layer_5 {
+ xlnx,layer-id = <5>;
+ xlnx,vformat = "AB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_6: layer_6 {
+ xlnx,layer-id = <6>;
+ xlnx,vformat = "XB24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_overlay_7: layer_7 {
+ xlnx,layer-id = <7>;
+ xlnx,vformat = "BG24";
+ xlnx,layer-alpha;
+ xlnx,layer-scale;
+ xlnx,layer-max-width=<1920>;
+ };
+ xv_mix_logo: logo {
+ xlnx,layer-id = <8>;
+ xlnx,logo-height = <64>;
+ xlnx,logo-width = <64>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
new file mode 100644
index 000000000000..c6034bffc64a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,pl-disp.txt
@@ -0,0 +1,41 @@
+Xilinx PL Display driver
+------------------------
+
+Pl_Display is a logical device to provide completeness to xilinx display
+pipeline. This is a software driver for providing drm components crtc
+and plane for various IPs using xilinx display pipelines.
+
+A linear pipeline with multiple blocks:
+DMA --> PL_Display --> SDI
+
+Required properties:
+
+- compatible: Must be "xlnx,pl-disp"
+- dmas: dma attach to pipeline
+- dma-names: names for dma
+- xlnx,vformat: video format for layer
+- port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+- reg: Base address and size of device
+
+Optional properties:
+ - xlnx,bridge: bridge phandle
+ This handle is required only when VTC is connected as bridge.
+
+Example:
+
+ drm-pl-disp-drv {
+ compatible = "xlnx,pl-disp";
+ dmas = <&axi_vdma_0 0>;
+ dma-names = "dma0";
+ xlnx,vformat = "YUYV";
+ xlnx,bridge = <&v_tc_0>;
+ pl_disp_port@0 {
+ reg = <0>;
+ endpoint {
+ remote-endpoint = <&sdi_port>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
new file mode 100644
index 000000000000..701fef456765
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt
@@ -0,0 +1,57 @@
+Device-Tree bindings for Xilinx SDI Tx subsystem
+
+The IP core supports transmission of video data in SDI Tx protocol
+
+Required properties:
+ - compatible: Should be "xlnx,sdi-tx".
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core.
+ - port: Logical block can be used / connected independently with
+ external device. In the display controller port nodes, topology
+ for entire pipeline should be described using the DT bindings defined in
+ Documentation/devicetree/bindings/graph.txt.
+ Minimum one port is required. At max, 2 ports are present.
+ The reg index for AXI4 stream port is 0 and for ancillary data is 1.
+ - clocks: List of phandles to AXI Lite, Video and SDI Tx Clock
+ - clock-names: Must contain "s_axi_aclk", "video_in_clk" and "sdi_tx_clk"
+ in same order as clocks listed in clocks property.
+
+Optional properties:
+ - xlnx,vpss: vpss phandle
+ This handle is required only when VPSS is connected to SDI as bridge.
+ - xlnx,tx-insert-c-str-st352: Insert ST352 payload in Chroma stream.
+ - interrupt-names: Should be "sdi_tx_irq". This is only required when
+ multiple interrupts are connected in the hardware design.
+
+Example:
+
+ sdi_tx_subsystem@80000000 {
+ compatible = "xlnx,sdi-tx";
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ interrupt-names = "sdi_tx_irq";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,vpss = <&v_proc_ss_0>;
+ clock-names = "s_axi_aclk", "video_in_clk", "sdi_tx_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ encoder_sdi_port: port@0 {
+ reg = <0>;
+ sdi_encoder: endpoint {
+ remote-endpoint = <&pl_disp_crtc>;
+ };
+ };
+
+ sdi_audio_port: port@1 {
+ reg = <1>;
+ sdi_audio_sink_port: endpoint {
+ remote-endpoint = <&sditx_audio_embed_src_port>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
new file mode 100644
index 000000000000..cf80d185d429
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-csc.txt
@@ -0,0 +1,35 @@
+Xllinx VPSS Color Space Converter
+-----------------------------------------
+The Xilinx VPSS Color Space Converter is a Video IP that supports
+color space conversion from RGB to YUV 444/422/420 and vice versa.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-csc".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,video-width: This property qualifies the video format with sample
+ width expressed as a number of bits per pixel component. Supported video
+ width values are 8/10/12/16.
+
+-reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: phandle to IP clock.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ csc@a0040000 {
+ compatible = "xlnx,vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,video-width = <8>;
+ clocks = <&misc_clk_0>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
new file mode 100644
index 000000000000..3927b88be0cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vpss-scaler.txt
@@ -0,0 +1,50 @@
+Xilinx VPSS Scaler
+------------------
+The Xilinx VPSS Scaler is a Video IP that supports up scaling,
+down scaling and no scaling functionailty. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,vpss-scaler-2.2" or "xlnx,vpss-scaler".
+
+- reg: Physical base address and length of registers set for the device.
+
+- xlnx,num-hori-taps: The number of horizontal taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+- xlnx,num-vert-taps: The number of vertical taps for scaling filter
+ supported tap values are 2/4/6/8/10/12.
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6, 8, 10, 12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP.
+ supported values are 1 and 2.
+
+- reset-gpios: GPIO specifier to assert/de-assert the reset line.
+
+- clocks: List of phandles to AXI Lite and Video clock
+
+- clock-names: Must contain "aclk_ctrl" and "aclk_axis" in same order as clocks
+ listed in clocks property.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- xlnx,max-height: Maximum number of lines in a frame.
+ Valid range from 64 to 4320.
+
+Example:
+ scaler@a0040000 {
+ compatible = "xlnx,vpss-scaler";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ reset-gpios = <&gpio 0x0 GPIO_ACTIVE_LOW>;
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ clock-names = "aclk_ctrl", "aclk_axis";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ }
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
new file mode 100644
index 000000000000..6a4d5bcc5e59
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,vtc.txt
@@ -0,0 +1,32 @@
+Device-Tree bindings for Xilinx Video Timing Controller(VTC)
+
+Xilinx VTC is a general purpose video timing generator and detector.
+The input side of this core automatically detects horizontal and
+vertical synchronization, pulses, polarity, blanking timing and active pixels.
+While on the output, it generates the horizontal and vertical blanking and
+synchronization pulses used with a standard video system including support
+for programmable pulse polarity.
+
+The core is commonly used with Video in to AXI4-Stream core to detect the
+format and timing of incoming video data or with AXI4-Stream to Video out core
+to generate outgoing video timing for downstream sinks like a video monitor.
+
+For details please refer to
+https://www.xilinx.com/support/documentation/ip_documentation/v_tc/v6_1/pg016_v_tc.pdf
+
+Required properties:
+ - compatible: value should be "xlnx,bridge-v-tc-6.1"
+ - reg: base address and size of the VTC IP
+ - xlnx,pixels-per-clock: Pixels per clock of the stream. Can be 1, 2 or 4.
+ - clocks: List of phandles for AXI Lite and Video Clock
+ - clock-names: Must contain "s_axi_aclk" and "clk" in same order as clocks listed
+ in clocks property.
+
+Example:
+ v_tc_0: v_tc@80030000 {
+ compatible = "xlnx,bridge-v-tc-6.1";
+ reg = <0x0 0x80030000 0x0 0x10000>;
+ xlnx,pixels-per-clock = <2>;
+ clock-names = "s_axi_aclk", "clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>;
+ };
diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
new file mode 100644
index 000000000000..46d0c7671ee5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.txt
@@ -0,0 +1,82 @@
+Xilinx ZynqMP DisplayPort subsystem
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,zynqmp-dpsub-1.7".
+
+- reg: Physical base address and length of the registers set for the device.
+- reg-names: Must be "dp", "blend", "av_buf", and "aud" to map logical register
+ partitions.
+
+- interrupts: Interrupt number.
+- interrupts-parent: phandle for interrupt controller.
+
+- clocks: phandles for axi, audio, non-live video, and live video clocks.
+ axi clock is required. Audio clock is optional. If not present, audio will
+ be disabled. One of non-live or live video clock should be present.
+- clock-names: The identification strings are required. "aclk" for axi clock.
+ "dp_aud_clk" for audio clock. "dp_vtc_pixel_clk_in" for non-live video clock.
+ "dp_live_video_in_clk" for live video clock (clock from programmable logic).
+
+- phys: phandles for phy specifier. The number of lanes is configurable
+ between 1 and 2. The number of phandles should be 1 or 2.
+- phy-names: The identifier strings. "dp-phy" followed by index, 0 or 1.
+ For single lane, only "dp-phy0" is required. For dual lane, both "dp-phy0"
+ and "dp-phy1" are required where "dp-phy0" is the primary lane.
+
+- power-domains: phandle for the corresponding power domain
+
+- vid-layer, gfx-layer: Required to represent available layers
+
+Required layer properties
+
+- dmas: phandles for DMA channels as defined in
+ Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: The identifier strings are required. "gfx0" for graphics layer
+ dma channel. "vid" followed by index (0 - 2) for video layer dma channels.
+
+Optional child node
+
+- The driver populates any child device node in this node. This can be used,
+ for example, to populate the sound device from the DisplayPort subsystem
+ driver.
+
+Example:
+ zynqmp-display-subsystem@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk", "dp_live_video_in_clk";
+ clocks = <&dp_aclk>, <&clkc 17>, <&si570_1>;
+
+ phys = <&lane1>, <&lane0>;
+ phy-names = "dp-phy0", "dp-phy1";
+
+ power-domains = <&pd_dp>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ dma-names = "vid0", "vid1", "vid2", "gfx0";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>,
+ <&xlnx_dpdma 3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
new file mode 100644
index 000000000000..f4f5b018dfa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/axi-dma.txt
@@ -0,0 +1,38 @@
+* Xilinx AXI DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-dma-test-1.00.a"
+- dmas: a list of <[DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+
+Example:
+++++++++
+
+dmatest_0: dmatest@0 {
+ compatible ="xlnx,axi-dma-test-1.00.a";
+ dmas = <&axi_dma_0 0
+ &axi_dma_0 1>;
+ dma-names = "axidma0", "axidma1";
+} ;
+
+
+Xilinx AXI DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+
+axi_dma_0: axidma@40400000 {
+ compatible = "xlnx,axi-dma-1.00.a";
+ #dma-cells = <1>;
+ reg = < 0x40400000 0x10000 >;
+ dma-channel@40400000 {
+ compatible = "xlnx,axi-dma-mm2s-channel";
+ interrupts = < 0 59 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+ dma-channel@40400030 {
+ compatible = "xlnx,axi-dma-s2mm-channel";
+ interrupts = < 0 58 4 >;
+ xlnx,datawidth = <0x40>;
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
new file mode 100644
index 000000000000..acdcc445f01b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
@@ -0,0 +1,67 @@
+* Xilinx PS PCIe Root DMA
+
+Required properties:
+- compatible: Should be "xlnx,ps_pcie_dma-1.00.a"
+- reg: Register offset for Root DMA channels
+- reg-names: Name for the register. Should be "ps_pcie_regbase"
+- interrupts: Interrupt pin for Root DMA
+- interrupt-names: Name for the interrupt. Should be "ps_pcie_rootdma_intr"
+- interrupt-parent: Should be gic in case of zynqmp
+- rootdma: Indicates this platform device is root dma.
+ This is required as the same platform driver will be invoked by pcie end points too
+- dma_vendorid: 16 bit PCIe device vendor id.
+ This can be later used by dma client for matching while using dma_request_channel
+- dma_deviceid: 16 bit PCIe device id
+ This can be later used by dma client for matching while using dma_request_channel
+- numchannels: Indicates number of channels to be enabled for the device.
+ Valid values are from 1 to 4 for zynqmp
+- ps_pcie_channel : One for each channel to be enabled.
+ This array contains channel specific properties.
+ Index 0: Direction of channel
+ Direction of channel can be either PCIe Memory to AXI memory i.e., Host to Card or
+ AXI Memory to PCIe memory i.e., Card to Host
+ PCIe to AXI Channel Direction is represented as 0x1
+ AXI to PCIe Channel Direction is represented as 0x0
+ Index 1: Number of Buffer Descriptors
+ This number describes number of buffer descriptors to be allocated for a channel
+ Index 2: Number of Queues
+ Each Channel has four DMA Buffer Descriptor Queues.
+ By default All four Queues will be managed by Root DMA driver.
+ User may choose to have only two queues either Source and it's Status Queue or
+ Destination and it's Status Queue to be handled by Driver.
+ The other two queues need to be handled by user logic which will not be part of this driver.
+ All Queues on Host is represented by 0x4
+ Two Queues on Host is represented by 0x2
+ Index 3: Coalesce Count
+ This number indicates the number of transfers after which interrupt needs to
+ be raised for the particular channel. The allowed range is from 0 to 255
+ Index 4: Coalesce Count Timer frequency
+ This property is used to control the frequency of poll timer. Poll timer is
+ created for a channel whenever coalesce count value (>= 1) is programmed for the particular
+ channel. This timer is helpful in draining out completed transactions even though interrupt is
+ not generated.
+
+Client Usage:
+ DMA clients can request for these channels using dma_request_channel API
+
+
+Xilinx PS PCIe Root DMA node Example
+++++++++++++++++++++++++++++++++++++
+
+ pci_rootdma: rootdma@fd0f0000 {
+ compatible = "xlnx,ps_pcie_dma-1.00.a";
+ reg = <0x0 0xfd0f0000 0x0 0x1000>;
+ reg-names = "ps_pcie_regbase";
+ interrupts = <0 117 4>;
+ interrupt-names = "ps_pcie_rootdma_intr";
+ interrupt-parent = <&gic>;
+ rootdma;
+ dma_vendorid = /bits/ 16 <0x10EE>;
+ dma_deviceid = /bits/ 16 <0xD021>;
+ numchannels = <0x4>;
+ #size-cells = <0x5>;
+ ps_pcie_channel0 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel1 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel2 = <0x1 0x7CF 0x4 0x0 0x3E8>;
+ ps_pcie_channel3 = <0x0 0x7CF 0x4 0x0 0x3E8>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
new file mode 100644
index 000000000000..5821fdc3e5e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/vdmatest.txt
@@ -0,0 +1,39 @@
+* Xilinx Video DMA Test client
+
+Required properties:
+- compatible: Should be "xlnx,axi-vdma-test-1.00.a"
+- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
+ where Channel ID is '0' for write/tx and '1' for read/rx
+ channel.
+- dma-names: a list of DMA channel names, one per "dmas" entry
+- xlnx,num-fstores: Should be the number of framebuffers as configured in
+ VDMA device node.
+
+Example:
+++++++++
+
+vdmatest_0: vdmatest@0 {
+ compatible ="xlnx,axi-vdma-test-1.00.a";
+ dmas = <&axi_vdma_0 0
+ &axi_vdma_0 1>;
+ dma-names = "vdma0", "vdma1";
+ xlnx,num-fstores = <0x8>;
+} ;
+
+
+Xilinx Video DMA Device Node Example
+++++++++++++++++++++++++++++++++++++
+axi_vdma_0: axivdma@44A40000 {
+ compatible = "xlnx,axi-vdma-1.00.a";
+ ...
+ dma-channel@44A40000 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+ dma-channel@44A40030 {
+ ...
+ xlnx,num-fstores = <0x8>;
+ ...
+ } ;
+} ;
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
new file mode 100644
index 000000000000..5f1e680ffcc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dpdma.txt
@@ -0,0 +1,91 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Subsystem
+
+The ZynqMP DisplayPort subsystem handles DMA channel buffer management,
+blending, and audio mixing. The DisplayPort subsystem receives display
+and audio frames from DPDMA and transmits output to the DisplayPort IP core.
+
+Required properties:
+ - compatible: Should be "xlnx,dpdma".
+ - reg: Base address and size of the IP core.
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - clocks: phandle for AXI clock
+ - clock-names: The identification string, "axi_clk", is always required.
+
+Required child node properties:
+- compatible: Should be one of "xlnx,video0", "xlnx,video1", "xlnx,video2",
+ "xlnx,graphics", "xlnx,audio0", or "xlnx,audio1".
+
+Example:
+
+ xlnx_dpdma: axidpdma@43c10000 {
+ compatible = "xlnx,dpdma";
+ reg = <0x43c10000 0x1000>;
+ interrupts = <0 54 4>;
+ interrupt-parent = <&intc>;
+ clocks = <&clkc 16>;
+ clock-names = "axi_clk";
+ xlnx,axi-clock-freq = <200000000>;
+
+ dma-channels = <6>;
+
+ #dma-cells = <1>;
+ dma-video0channel@43c10000 {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel@43c10000 {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel@43c10000 {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel@43c10000 {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel@43c10000 {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel@43c10000 {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+* DMA client
+
+Required properties:
+- dmas: a list of <[DPDMA device phandle] [Channel ID]> pairs. "Channel ID"
+ is defined as video0 = 0, video1 = 1, video2 = 2, graphics = 3, audio0 = 4,
+ and audio1 = 5.
+
+Example:
+
+ xilinx_drm {
+ compatible = "xlnx,drm";
+ xlnx,encoder-slave = <&xlnx_dp>;
+ clocks = <&si570 0>;
+ xlnx,connector-type = "DisplayPort";
+ xlnx,dp-sub = <&xlnx_dp_sub>;
+ planes {
+ xlnx,pixel-format = "rgb565";
+ plane0 {
+ dmas = <&xlnx_dpdma 3>;
+ dma-names = "dma";
+ };
+ plane1 {
+ dmas = <&xlnx_dpdma 0>;
+ dma-names = "dma";
+ };
+ };
+ };
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ xlnx_dp_snd_pcm1: dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
new file mode 100644
index 000000000000..484389930dca
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
@@ -0,0 +1,123 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+FB Read Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-rd-v2.1". Older string
+ "xlnx,axi-frmbuf-rd-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-rd" and the hardware it
+represented is no longer supported.
+
+FB Write Required propertie(s):
+- compatible : Should be "xlnx,axi-frmbuf-wr-v2.1". Older string
+ "xlnx,axi-frmbuf-wr-v2" is now deprecated.
+
+Note: Compatible string "xlnx,axi-frmbuf-wr" and the hardware it
+represented is no longer supported.
+
+Required Properties Common to both FB Read and FB Write:
+- #dma-cells : should be 1
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain DMA channel interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Memory map for module access
+- xlnx,dma-addr-width : Size of dma address pointer in IP (either 32 or 64)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+
+Required Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,pixels-per-clock : Pixels per clock set in IP (1, 2, 4 or 8)
+- clocks: Reference to the AXI Streaming clock feeding the AP_CLK
+- clock-names: Must have "ap_clk"
+- xlnx,max-height : Maximum number of lines.
+ Valid range from 64 to 4320.
+- xlnx,max-width : Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+Optional Properties Common to both FB Read and FB Write for v2.1:
+- xlnx,dma-align : DMA alignment required in bytes.
+ If absent then dma alignment is calculated as
+ pixels per clock * 8.
+ If present it should be power of 2 and at least
+ pixels per clock * 8.
+ Minimum is 8, 16, 32 when pixels-per-clock is
+ 1, 2 or 4.
+- xlnx,fid : Field ID enabled for interlaced video support.
+ Can be absent for progressive video.
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+two columns to the right describe how this is mapped to an equivalent V4L2
+and DRM fourcc code---respectively---by the driver.
+
+IP FORMAT DTS String V4L2 Fourcc DRM Fourcc
+-------------|----------------|----------------------|---------------------
+RGB8 bgr888 V4L2_PIX_FMT_RGB24 DRM_FORMAT_BGR888
+BGR8 rgb888 V4L2_PIX_FMT_BGR24 DRM_FORMAT_RGB888
+RGBX8 xbgr8888 V4L2_PIX_FMT_BGRX32 DRM_FORMAT_XBGR8888
+RGBA8 abgr8888 <not supported> DRM_FORMAT_ABGR8888
+BGRA8 argb8888 <not supported> DRM_FORMAT_ARGB8888
+BGRX8 xrgb8888 V4L2_PIX_FMT_XBGR32 DRM_FORMAT_XRGB8888
+RGBX10 xbgr2101010 V4L2_PIX_FMT_XBGR30 DRM_FORMAT_XBGR2101010
+RGBX12 xbgr2121212 V4L2_PIX_FMT_XBGR40 <not supported>
+RGBX16 rgb16 V4L2_PIX_FMT_BGR40 <not supported>
+YUV8 vuy888 V4L2_PIX_FMT_VUY24 DRM_FORMAT_VUY888
+YUVX8 xvuy8888 V4L2_PIX_FMT_XVUY32 DRM_FORMAT_XVUY8888
+YUYV8 yuyv V4L2_PIX_FMT_YUYV DRM_FORMAT_YUYV
+UYVY8 uyvy V4L2_PIX_FMT_UYVY DRM_FORMAT_UYVY
+YUVA8 avuy8888 <not supported> DRM_FORMAT_AVUY
+YUVX10 yuvx2101010 V4L2_PIX_FMT_XVUY10 DRM_FORMAT_XVUY2101010
+Y8 y8 V4L2_PIX_FMT_GREY DRM_FORMAT_Y8
+Y10 y10 V4L2_PIX_FMT_Y10 DRM_FORMAT_Y10
+Y_UV8 nv16 V4L2_PIX_FMT_NV16 DRM_FORMAT_NV16
+Y_UV8 nv16 V4L2_PIX_FMT_NV16M DRM_FORMAT_NV16
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12 DRM_FORMAT_NV12
+Y_UV8_420 nv12 V4L2_PIX_FMT_NV12M DRM_FORMAT_NV12
+Y_UV10 xv20 V4L2_PIX_FMT_XV20M DRM_FORMAT_XV20
+Y_UV10 xv20 V4L2_PIX_FMT_XV20 <not supported>
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV15M DRM_FORMAT_XV15
+Y_UV10_420 xv15 V4L2_PIX_FMT_XV20 <not supported>
+
+Examples:
+
+FB Read Example:
+++++++++
+v_frmbuf_rd_0: v_frmbuf_rd@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-rd-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <32>;
+ xlnx,vid-formats = "bgr888","xbgr8888";
+ xlnx,pixels-per-clock = <1>;
+ xlnx,dma-align = <8>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
+
+FB Write Example:
+++++++++
+v_frmbuf_wr_0: v_frmbuf_wr@80000000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,axi-frmbuf-wr-v2.1";
+ interrupt-parent = <&gic>;
+ interrupts = <0 92 4>;
+ reset-gpios = <&gpio 80 1>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,dma-addr-width = <64>;
+ xlnx,vid-formats = "bgr888","yuyv","nv16","nv12";
+ xlnx,pixels-per-clock = <2>;
+ xlnx,dma-align = <16>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "ap_clk"
+};
diff --git a/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
new file mode 100644
index 000000000000..bb9e30af4afc
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/zocl/zocl_drm.txt
@@ -0,0 +1,13 @@
+Binding for ZynQ OpenCL DRM driver
+
+Required properties:
+- compatible: should contain "xlnx,zocl"
+- reg: base address and size for memory mapped control port for opencl kernel
+
+Example:
+
+ zocl_drm {
+ compatible = "xlnx,zocl";
+ status = "okay";
+ reg = <0x80000000 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
new file mode 100644
index 000000000000..552f0c7774b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/cortex-arm64-edac.txt
@@ -0,0 +1,15 @@
+* ARM Cortex A57 and A53 L1/L2 cache error reporting
+
+CPU Memory Error Syndrome and L2 Memory Error Syndrome registers can be used
+for checking L1 and L2 memory errors.
+
+The following section describes the Cortex A57/A53 EDAC DT node binding.
+
+Required properties:
+- compatible: Should be "arm,cortex-a57-edac" or "arm,cortex-a53-edac"
+
+Example:
+ edac {
+ compatible = "arm,cortex-a57-edac";
+ };
+
diff --git a/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
new file mode 100644
index 000000000000..94fbb8da2d1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/pl310_edac_l2.txt
@@ -0,0 +1,19 @@
+Pl310 L2 Cache EDAC driver, it does reports the data and tag ram parity errors.
+
+Required properties:
+- compatible: Should be "arm,pl310-cache".
+- intterupts: Interrupt number to the cpu.
+- reg: Physical base address and size of cache controller's memory mapped
+ registers
+
+Example:
+++++++++
+
+ L2: cache-controller {
+ compatible = "arm,pl310-cache";
+ interrupts = <0 2 4>;
+ reg = <0xf8f02000 0x1000>;
+ };
+
+PL310 L2 Cache EDAC driver detects the Parity enable state by reading the
+appropriate control register.
diff --git a/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
new file mode 100644
index 000000000000..252bb96bee90
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/zynqmp_ocm_edac.txt
@@ -0,0 +1,18 @@
+Xilinx ZynqMP OCM EDAC driver, it does reports the OCM ECC single bit errors
+that are corrected and double bit ecc errors that are detected by the OCM
+ECC controller.
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-ocmc-1.0".
+- reg: Should contain OCM controller registers location and length.
+- interrupt-parent: Should be core interrupt controller.
+- interrupts: Property with a value describing the interrupt number.
+
+Example:
+++++++++
+ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
new file mode 100644
index 000000000000..85f8970010b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,afi-fpga.txt
@@ -0,0 +1,61 @@
+Xilinx ZynqMp AFI interface Manager
+
+The Zynq UltraScale+ MPSoC Processing System core provides access from PL
+masters to PS internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,afi-fpga"
+-config-afi: Pairs of <regid value >
+
+The possible values of regid and values are
+ regid: Regids of the register to be written possible values
+ 0- AFIFM0_RDCTRL
+ 1- AFIFM0_WRCTRL
+ 2- AFIFM1_RDCTRL
+ 3- AFIFM1_WRCTRL
+ 4- AFIFM2_RDCTRL
+ 5- AFIFM2_WRCTRL
+ 6- AFIFM3_RDCTRL
+ 7- AFIFM3_WRCTRL
+ 8- AFIFM4_RDCTRL
+ 9- AFIFM4_WRCTRL
+ 10- AFIFM5_RDCTRL
+ 11- AFIFM5_WRCTRL
+ 12- AFIFM6_RDCTRL
+ 13- AFIFM6_WRCTRL
+ 14- AFIFS
+ 15- AFIFS_SS2
+- value: Array of values to be written.
+ for FM0_RDCTRL(0) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM0_WRCTRL(1) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_RDCTRL(2) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM1_WRCTRL(3) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_RDCTRL(4) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM2_WRCTRL(5) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_RDCTRL(6) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM3_WRCTRL(7) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_RDCTRL(8) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM4_WRCTRL(9) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_RDCTRL(10) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM5_WRCTRL(11) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_RDCTRL(12) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for FM6_WRCTRL(13) the valid values-fabric width 2: 32-bit,1 : 64-bit ,0: 128-bit enabled
+ for AFI_FA(14)
+ dw_ss1_sel bits (11:10)
+ dw_ss0_sel bits (9:8)
+ 0x0: 32-bit AXI data width),
+ 0x1: 64-bit AXI data width,
+ 0x2: 128-bit AXI data
+ All other bits are 0 write ignored.
+
+ for AFI_FA(15) selects for ss2AXI data width valid values
+ 0x000: 32-bit AXI data width),
+ 0x100: 64-bit AXI data width,
+ 0x200: 128-bit AXI data
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,afi-fpga";
+ config-afi = <0 2>, <1 1>, <2 1>;
+};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
new file mode 100644
index 000000000000..acca970cd341
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,versal-fpga.txt
@@ -0,0 +1,10 @@
+Device Tree versal-fpga bindings for the Versal SOC, Controlled
+using Versal SoC firmware interface.
+
+Required properties:
+- compatible: should contain "xlnx,versal-fpga"
+
+Example:
+ versal_fpga: fpga {
+ compatible = "xlnx,versal-fpga";
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
new file mode 100644
index 000000000000..e00942cf3091
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynq-afi-fpga.txt
@@ -0,0 +1,19 @@
+Xilinx Zynq AFI interface Manager
+
+The Zynq Processing System core provides access from PL masters to PS
+internal peripherals, and memory through AXI FIFO interface
+(AFI) interfaces.
+
+Required properties:
+-compatible: Should contain "xlnx,zynq-afi-fpga"
+-reg: Physical base address and size of the controller's register area.
+-xlnx,afi-buswidth : Size of the afi bus width.
+ 0: 64-bit AXI data width,
+ 1: 32-bit AXI data width,
+
+Example:
+afi0: afi0 {
+ compatible = "xlnx,zynq-afi-fpga";
+ reg = <0xf8008000 0x1000>;
+ xlnx,afi-buswidth = <1>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
index 08eed2335db0..516f4f50b124 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
@@ -8,11 +8,17 @@ local interrupts can be enabled on channel basis.
Required properties:
- compatible : Should be "xlnx,xps-gpio-1.00.a"
- reg : Address and length of the register set for the device
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
+- #gpio-cells : Should be two or three. The first cell is the pin number,
+ The second cell is used to specify channel offset:
+ 0 - first channel
+ 8 - second channel
+ The third cell is optional and used to specify flags. Use the macros
+ defined in include/dt-bindings/gpio/gpio.h
- gpio-controller : Marks the device node as a GPIO controller.
Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks: Input clock specifier. Refer to common clock bindings.
- interrupts : Interrupt mapping for GPIO IRQ.
- xlnx,all-inputs : if n-th bit is setup, GPIO-n is input
- xlnx,dout-default : if n-th bit is 1, GPIO-n default value is 1
@@ -23,6 +29,7 @@ Optional properties:
- xlnx,dout-default-2 : as above but the second channel
- xlnx,gpio2-width : as above but for the second channel
- xlnx,tri-default-2 : as above but for the second channel
+- xlnx,no-init : No initialisation at probe
Example:
@@ -30,6 +37,8 @@ gpio: gpio@40000000 {
#gpio-cells = <2>;
compatible = "xlnx,xps-gpio-1.00.a";
gpio-controller ;
+ clock-names = "s_axi_aclk";
+ clocks = <&clkc 71>;
interrupt-parent = <&microblaze_0_intc>;
interrupts = < 6 2 >;
reg = < 0x40000000 0x10000 >;
@@ -44,3 +53,11 @@ gpio: gpio@40000000 {
xlnx,tri-default = <0xffffffff>;
xlnx,tri-default-2 = <0xffffffff>;
} ;
+
+Example to demonstrate how reset-gpios property is used in drivers:
+
+driver: driver@80000000 {
+ compatible = "xlnx,driver";
+ reset-gpios = <&gpio 0 0 GPIO_ACTIVE_LOW>; /* gpio phandle, gpio pin-number, channel offset, flag state */
+ reg = <0x0 0x80000000 0x0 0x10000>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
index 4fa4eb5507cd..f693e82b4c0f 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-zynq.txt
@@ -6,7 +6,9 @@ Required properties:
- First cell is the GPIO line number
- Second cell is used to specify optional
parameters (unused)
-- compatible : Should be "xlnx,zynq-gpio-1.0" or "xlnx,zynqmp-gpio-1.0"
+- compatible : Should be "xlnx,zynq-gpio-1.0" or
+ "xlnx,zynqmp-gpio-1.0" or "xlnx,versal-gpio-1.0
+ or "xlnx,pmc-gpio-1.0
- clocks : Clock specifier (see clock bindings for details)
- gpio-controller : Marks the device node as a GPIO controller.
- interrupts : Interrupt specifier (see interrupt bindings for
diff --git a/Documentation/devicetree/bindings/hwmon/tps544.txt b/Documentation/devicetree/bindings/hwmon/tps544.txt
new file mode 100644
index 000000000000..bb71fd287ca2
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/tps544.txt
@@ -0,0 +1,14 @@
+TPS544B25 power regulator
+
+This power regulator driver supports voltage read/write and
+current calibration and readback.
+
+Required properties:
+- compatible: should be "ti,tps544"
+- reg: I2C slave address
+
+Example:
+tps544@24 {
+ compatible = "ti,tps544";
+ reg = <0x24>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
new file mode 100644
index 000000000000..3d1e77014865
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-ams.txt
@@ -0,0 +1,159 @@
+Xilinx AMS device driver
+
+The AMS includes an ADC as well as on-chip sensors that can be used to
+sample external voltages and monitor on-die operating conditions, such as
+temperature and supply voltage levels. The AMS has two SYSMON blocks.
+PL-SYSMON block is capable of monitoring off chip voltage and temperature.
+PL-SYSMON block has DRP, JTAG and I2C interface to enable monitoring from
+external master. Out of this interface currenlty only DRP is supported.
+Other block PS-SYSMON is memory mapped to PS. Both of block has built-in
+alarm generation logic that is used to interrupt the processor based on
+condition set.
+
+All designs should have AMS register, but PS and PL are optional depending on
+the design. The driver can work with only PS, only PL and both PS and PL
+configurations. Please specify registers according to your design. DTS file
+should always have AMS module property. Providing PS & PL module is optional.
+
+Required properties:
+ - compatible: Should be "xlnx,zynqmp-ams"
+ - reg: Should specify AMS register space
+ - interrupts: Interrupt number for the AMS control interface
+ - interrupt-names: Interrupt name, must be "ams-irq"
+ - clocks: Should contain a clock specifier for the device
+ - ranges: keep the property empty to map child address space
+ (for PS and/or PL) nodes 1:1 onto the parent address
+ space
+
+AMS device tree subnode:
+ - compatible: Should be "xlnx,zynqmp-ams-ps" or "xlnx,zynqmp-ams-pl"
+ - reg: Register space for PS or PL
+
+Optional properties:
+
+Following optional property only valid for PL.
+ - xlnx,ext-channels: List of external channels that are connected to the
+ AMS PL module.
+
+ The child nodes of this node represent the external channels which are
+ connected to the AMS Module. If the property is not present
+ no external channels will be assumed to be connected.
+
+ Each child node represents one channel and has the following
+ properties:
+ Required properties:
+ * reg: Pair of pins the channel is connected to.
+ 0: VP/VN
+ 1: VUSER0
+ 2: VUSER1
+ 3: VUSER3
+ 4: VUSER4
+ 5: VAUXP[0]/VAUXN[0]
+ 6: VAUXP[1]/VAUXN[1]
+ ...
+ 20: VAUXP[15]/VAUXN[15]
+ Note each channel number should only be used at most
+ once.
+ Optional properties:
+ * xlnx,bipolar: If set the channel is used in bipolar
+ mode.
+
+
+Example:
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ clocks = <&clkc 70>;
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ xlnx,ext-channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ channel@1 {
+ reg = <1>;
+ };
+ channel@8 {
+ reg = <8>;
+ xlnx,bipolar;
+ };
+ };
+ };
+ };
+
+AMS Channels Details:
+
+Sysmon Block |Channel| Details |Measurement
+ Number Type
+---------------------------------------------------------------------------------------------------------
+AMS CTRL |0 |System PLLs voltage measurement, VCC_PSPLL. |Voltage
+ |1 |Battery voltage measurement, VCC_PSBATT. |Voltage
+ |2 |PL Internal voltage measurement, VCCINT. |Voltage
+ |3 |Block RAM voltage measurement, VCCBRAM. |Voltage
+ |4 |PL Aux voltage measurement, VCCAUX. |Voltage
+ |5 |Voltage measurement for six DDR I/O PLLs, VCC_PSDDR_PLL. |Voltage
+ |6 |VCC_PSINTFP_DDR voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PS Sysmon |7 |LPD temperature measurement. |Temperature
+ |8 |FPD Temperature Measurment (REMOTE). |Temperature
+ |9 |VCC PS LPD voltage measurement (supply1). |Voltage
+ |10 |VCC PS FPD voltage measurement (supply2). |Voltage
+ |11 |PS Aux voltage reference (supply3). |Voltage
+ |12 |DDR I/O VCC voltage measurement. |Voltage
+ |13 |PS IO Bank 503 voltage measurement (supply5). |Voltage
+ |14 |PS IO Bank 500 voltage measurement (supply6). |Voltage
+ |15 |VCCO_PSIO1 voltage measurement. |Voltage
+ |16 |VCCO_PSIO2 voltage measurement. |Voltage
+ |17 |VCC_PS_GTR voltage measurement (VPS_MGTRAVCC). |Voltage
+ |18 |VTT_PS_GTR voltage measurement (VPS_MGTRAVTT). |Voltage
+ |19 |VCC_PSADC voltage measurement. |Voltage
+---------------------------------------------------------------------------------------------------------
+PL Sysmon |20 |PL Temperature measurement. |Temperature
+ |21 |PL Internal Voltage Voltage measurement, VCCINT. |Voltage
+ |22 |PL Auxiliary Voltage measurement, VCCAUX. |Voltage
+ |23 |ADC Reference P+ Voltage measurement. |Voltage
+ |24 |ADC Reference N- Voltage measurement. |Voltage
+ |25 |PL Block RAM Voltage measurement, VCCBRAM. |Voltage
+ |26 |LPD Internal Voltage measurement, VCC_PSINTLP (supply4). |Voltage
+ |27 |FPD Internal Voltage measurement, VCC_PSINTFP (supply5). |Voltage
+ |28 |PS Auxiliary Voltage measurement (supply6). |Voltage
+ |29 |PL VCCADC Voltage measurement (vccams). |Voltage
+ |30 |Differencial analog input signal Voltage measurment. |Voltage
+ |31 |VUser0 Voltage measurement (supply7). |Voltage
+ |32 |VUser1 Voltage measurement (supply8). |Voltage
+ |33 |VUser2 Voltage measurement (supply9). |Voltage
+ |34 |VUser3 Voltage measurement (supply10). |Voltage
+ |35 |Auxiliary ch 0 Voltage measurement (VAux0). |Voltage
+ |36 |Auxiliary ch 1 Voltage measurement (VAux1). |Voltage
+ |37 |Auxiliary ch 2 Voltage measurement (VAux2). |Voltage
+ |38 |Auxiliary ch 3 Voltage measurement (VAux3). |Voltage
+ |39 |Auxiliary ch 4 Voltage measurement (VAux4). |Voltage
+ |40 |Auxiliary ch 5 Voltage measurement (VAux5). |Voltage
+ |41 |Auxiliary ch 6 Voltage measurement (VAux6). |Voltage
+ |42 |Auxiliary ch 7 Voltage measurement (VAux7). |Voltage
+ |43 |Auxiliary ch 8 Voltage measurement (VAux8). |Voltage
+ |44 |Auxiliary ch 9 Voltage measurement (VAux9). |Voltage
+ |45 |Auxiliary ch 10 Voltage measurement (VAux10). |Voltage
+ |46 |Auxiliary ch 11 Voltage measurement (VAux11). |Voltage
+ |47 |Auxiliary ch 12 Voltage measurement (VAux12). |Voltage
+ |48 |Auxiliary ch 13 Voltage measurement (VAux13). |Voltage
+ |49 |Auxiliary ch 14 Voltage measurement (VAux14). |Voltage
+ |50 |Auxiliary ch 15 Voltage measurement (VAux15). |Voltage
+---------------------------------------------------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
index e0e0755cabd8..fecb1afdd8c1 100644
--- a/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/xilinx-xadc.txt
@@ -15,6 +15,8 @@ Required properties:
configuration interface to interface to the XADC hardmacro.
* "xlnx,axi-xadc-1.00.a": When using the axi-xadc pcore to
interface to the XADC hardmacro.
+ * "xlnx,axi-sysmon-1.3": When using the axi-sysmon pcore to
+ interface to the sysmon hardmacro.
- reg: Address and length of the register set for the device
- interrupts: Interrupt for the XADC control interface.
- clocks: When using the ZYNQ this must be the ZYNQ PCAP clock,
@@ -110,3 +112,20 @@ Examples:
};
};
};
+
+ xadc@44a00000 {
+ compatible = "xlnx,axi-sysmon-1.3";
+ interrupt-parent = <&axi_intc_0>;
+ interrupts = <2 2>;
+ clocks = <&clk_bus_0>;
+ reg = <0x44a00000 0x10000>;
+
+ xlnx,channels {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ xlnx,bipolar;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
new file mode 100644
index 000000000000..03b39f4b1625
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/xilinx,intc.txt
@@ -0,0 +1,56 @@
+Xilinx Interrupt Controller
+
+The controller is a soft IP core that is configured at build time for the
+number of interrupts and the type of each interrupt. These details cannot
+be changed at run time.
+
+Required properties:
+
+- compatible : should be "xlnx,xps-intc-1.00.a"
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value shall be a minimum of 1.
+ The Xilinx device trees typically use 2 but the 2nd value
+ is not used.
+- xlnx,kind-of-intr : A 32 bit value specifying the interrupt type for each
+ possible interrupt (1 = edge, 0 = level). The interrupt
+ type typically comes in thru the device tree node of
+ the interrupt generating device, but in this case
+ the interrupt type is determined by the interrupt
+ controller based on how it was implemented.
+- xlnx,num-intr-inputs: Specifies the number of interrupts supported
+ by the specific implementation of the controller (1-32).
+
+Optional properties:
+- interrupt-parent : Specifies an interrupt controller from which it is
+ chained (cascaded).
+- interrupts : Specifies the interrupt of the parent controller from which
+ it is chained.
+
+Example:
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
+
+Chained Example:
+
+The interrupt is chained to hardware interrupt 61 (29 + 32) of the GIC
+for Zynq.
+
+axi_intc_0: interrupt-controller@41800000 {
+ #interrupt-cells = <2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller;
+ interrupt-parent = <&ps7_scugic_0>;
+ interrupts = <0 29 4>;
+ reg = <0x41800000 0x10000>;
+ xlnx,kind-of-intr = <0x1>;
+ xlnx,num-intr-inputs = <0x1>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
new file mode 100644
index 000000000000..0993ad622008
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
@@ -0,0 +1,128 @@
+
+Xilinx MIPI CSI2 Receiver Subsystem (CSI2RxSS)
+----------------------------------------------
+
+The Xilinx MIPI CSI2 Receiver Subsystem is used to capture MIPI CSI2 traffic
+from compliant camera sensors and send the output as AXI4 Stream video data
+for image processing. The subsystem consists of a MIPI DPHY in slave mode
+which captures the data packets. This is passed along the MIPI CSI2 IP which
+extracts the packet data. This data is taken in by the Video Format Bridge
+(VFB) if selected and converted into AXI4 Stream video data at selected
+pixels per clock as per AXI4-Stream Video IP and System Design UG934.
+
+For more details, please refer to PG232 MIPI CSI-2 Receiver Subsystem v4.1
+
+Required properties:
+
+- compatible: Must contain "xlnx,mipi-csi2-rx-subsystem-5.0" or
+ "xlnx,mipi-csi2-rx-subsystem-4.1" or "xlnx,mipi-csi2-rx-subsystem-4.0".
+ The older strings "xlnx,mipi-csi2-rx-subsystem-2.0" and
+ "xlnx,mipi-csi2-rx-subsystem-3.0" are deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- xlnx,max-lanes: Maximum active lanes in the design.
+
+- xlnx,en-active-lanes: Enable Active lanes configuration in Protocol
+ Configuration Register.
+
+- xlnx,vc: Virtual Channel, specifies virtual channel number to be filtered.
+ If this is 4 then all virtual channels are allowed.
+
+- xlnx,csi-pxl-format: This denotes the CSI Data type selected in hw design.
+ Packets other than this data type (except for RAW8 and User defined data
+ types) will be filtered out. Possible values are RAW6, RAW7, RAW8, RAW10,
+ RAW12, RAW14, RAW16, RAW20, RGB444, RGB555, RGB565, RGB666, RGB888 and YUV4228bit.
+
+- xlnx,vfb: Video Format Bridge, Denotes if Video Format Bridge is selected
+ so that output is as per AXI stream documented in UG934.
+
+- xlnx,ppc: Pixels per clock, Number of pixels to be transferred per pixel
+ clock. This is valid only if xlnx,vfb property is set to 1.
+
+- xlnx,axis-tdata-width: AXI Stream width, This denotes the AXI Stream width.
+ It depends on Data type chosen, Video Format Bridge enabled/disabled and
+ pixels per clock. If VFB is disabled then its value is either 0x20 (32 bit)
+ or 0x40(64 bit) width.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The CSI 2 Rx Subsystem has a two ports, one input port for connecting to
+ camera sensor and other is output port.
+
+- data-lanes: The number of data lanes through which CSI2 Rx Subsystem is
+ connected to the camera sensor as per video-interfaces.txt
+
+- clocks: List of phandles to AXI Lite, Video and 200 MHz DPHY clocks.
+
+- clock-names: Must contain "lite_aclk", "video_aclk" and "dphy_clk_200M" in
+ the same order as clocks listed in clocks property.
+
+Optional Properties
+
+- xlnx,en-vcx: When present, the max number of virtual channels can be 16 else 4.
+
+- reset-gpios: Optional specifier for a GPIO that asserts video_aresetn.
+
+- xlnx,dphy-present: Boolean to indicate whether DPHY register interface is
+ enabled or not. When this is present and compatible string is
+ xlnx,mipi-csi2-rx-subsystem-5.0 then DPHY offset is 0x1000 (4K) else
+ it is 0x1_0000 (64K).
+
+- xlnx,iic-present: Boolean to show whether subsystem's IIC is present or not.
+ This affects the base address of the DPHY. This can't be present when the
+ compatible string is "xlnx,mipi-csi2-rx-subsystem-4.1" or later.
+
+Example:
+
+ csiss_1: csiss@a0020000 {
+ compatible = "xlnx,mipi-csi2-rx-subsystem-5.0";
+ reg = <0x0 0xa0020000 0x0 0x20000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 95 4>;
+
+ reset-gpios = <&gpio 81 1>;
+ xlnx,max-lanes = <0x4>;
+ xlnx,en-active-lanes;
+ xlnx,dphy-present;
+ xlnx,iic-present;
+ xlnx,vc = <0x4>;
+ xlnx,csi-pxl-format = "RAW8";
+ xlnx,vfb;
+ xlnx,ppc = <0x4>;
+ xlnx,axis-tdata-width = <0x20>;
+
+ clock-names = "lite_aclk", "dphy_clk_200M", "video_aclk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+ csiss_out: endpoint {
+ remote-endpoint = <&vcap_csiss_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ csiss_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ /* MIPI CSI2 Camera handle */
+ remote-endpoint = <&vs2016_out>;
+ };
+
+ };
+
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
new file mode 100644
index 000000000000..73af77faeb20
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,mem2mem.txt
@@ -0,0 +1,25 @@
+Xilinx Video IP MEM2MEM Pipeline (XVIM2M)
+----------------------------------------
+
+Xilinx video IP mem2mem pipeline processes DMA transfers to achieve memory
+copy from one physical memory to other. The data is copied by employing two
+DMA transfers memory to device and device to memory transactions one after
+the other. The DT node of the XVIM2M represents as a top level node of the
+pipeline and defines mappings between DMAs.
+
+Required properties:
+
+- compatible: Must be "xlnx,mem2mem".
+
+- dmas, dma-names: List of two DMA specifier and identifier strings (as
+ defined in Documentation/devicetree/bindings/dma/dma.txt) per port.
+ Identifier string of one DMA channel should be "tx" and other should be
+ "rx".
+
+Example:
+
+ video_m2m {
+ compatible = "xlnx,mem2mem";
+ dmas = <&dma_1 0>, <&dma_2 0>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
new file mode 100644
index 000000000000..169338ed086d
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
@@ -0,0 +1,74 @@
+
+Xilinx SDI Receiver Subsystem
+------------------------------
+
+The Xilinx SDI Rx Subsystem is used to capture SDI Video in upto 12G mode.
+It outputs the video as an AXI4 Stream video data in YUV 422 10bpc mode.
+The subsystem consists of the SDI Rx IP whose SDI native output is connected
+to a SDI to Native conversion Bridge. The output of the Native bridge is
+connected to a Native to AXI4S Bridge which generates the AXI4 Stream of
+YUV422 or YUV420 10 bpc in dual pixel per clock.
+
+Required properties:
+
+- compatible: Must contain "xlnx,v-smpte-uhdsdi-rx-ss"
+
+- reg: Physical base address and length of the registers set for the device.
+
+- interrupts: Contains the interrupt line number.
+
+- interrupt-parent: phandle to interrupt controller.
+
+- xlnx,include-edh: Whether the EDH processor is enabled in design or not.
+
+- xlnx,line-rate: The maximum mode supported by the design.
+
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+- clock-names: List of input clocks.
+ Required elements: "s_axi_aclk", "sdi_rx_clk", "video_out_clk"
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+ The SDI Rx subsystem has one port configured as output port.
+
+- xlnx,video-format, xlnx,video-width: Video format and width, as defined in
+ video.txt. Please note that the video format is fixed to either YUV422 or YUV420
+ and the video-width is 10.
+
+Optional properties:
+
+- reset_gt-gpios: contains GPIO reset phandle for FMC init done pin in GT.
+ This pin is active low.
+- xlnx,bpp: This denotes the bit depth as 10 or 12 based on IP configuration.
+ The default value is 10 for backward compatibility.
+
+Example:
+ v_smpte_uhdsdi_rx_ss: v_smpte_uhdsdi_rx_ss@80000000 {
+ compatible = "xlnx,v-smpte-uhdsdi-rx-ss";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0x80000000 0x0 0x10000>;
+ xlnx,include-axilite = "true";
+ xlnx,include-edh = "true";
+ xlnx,include-vid-over-axi = "true";
+ xlnx,line-rate = "12G_SDI_8DS";
+ clocks = <&clk_1>, <&si570_1>, <&clk_2>;
+ clock-names = "s_axi_aclk", "sdi_rx_clk", "video_out_clk";
+ reset_gt-gpios = <&axi_gpio_0 0 0 GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <10>;
+
+ sdirx_out: endpoint {
+ remote-endpoint = <&vcap_sdirx_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
new file mode 100644
index 000000000000..fb5ed47d959a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
@@ -0,0 +1,141 @@
+Xilinx AXI4-Stream Switch
+-------------------------------
+
+The AXI4-Stream Switch provides configurable routing between masters and slaves.
+It supports up to 16 masters/sources and 16 slaves/sinks and two routing options.
+There is atleast one slave/sink port and two master/source ports.
+
+The two routing options available are TDEST routing and control register routing.
+The TDEST based routing uses design parameters and hence there no software control.
+Each port is mapped as a pad and has its own format specified.
+
+Control register routing introduces an AXI4-Lite interface to configure the
+routing table. There is one register for each of the master interfaces to
+control each of the selectors. This routing mode requires that there is
+precisely only one path between master and slave. When attempting to map the
+same slave interface to multiple master interfaces, only the lowest master
+interface is able to access the slave interface.
+Here only the slave/sink ports have formats as master/source ports will inherit
+the corresponding slave ports formats. A routing table is maintained in this case.
+
+Please refer to PG085 AXI4-Stream Infrastructure IP Suite v2.2 for more details.
+
+Required properties:
+
+ - compatible: Must be "xlnx,axis-switch-1.1".
+ - xlnx,routing-mode: Can be 0 (TDEST routing) or 1 (Control reg routing)
+ - xlnx,num-si-slots: Number of slave / input ports. Min 1 Max 16 .
+ - xlnx,num-mi-slots: Number of master / output ports. Min 1 Max 16.
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ - clocks: Reference to the AXI Streaming clock feeding the ACLK and
+ AXI4 Lite control interface clock when control routing is enabled.
+ - clock-names: Must have "aclk".
+
+Optional properties:
+ - reg: Physical base address and length of the registers set for the device.
+ This is required only if xlnx,routing-mode is 1.
+ - clocks: Reference to AXI4 Lite control interface clock when routing-mode is 1.
+ - clock-names: "s_axi_ctl_clk" clock for AXI4 Lite interface when routing-mode is 1.
+
+Example:
+
+For TDEST routing, from 1 slave port to 4 master ports
+
+ axis_switch_0: axis_switch@0 {
+ compatible = "xlnx,axis-switch-1.1";
+ xlnx,routing-mode = <0x0>;
+ xlnx,num-si-slots = <0x1>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>;
+ clock-names = "aclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
+
+For Control reg based routing, from 2 slave ports to 4 master ports
+
+ axis_switch_0: axis_switch@a0050000 {
+ compatible = "xlnx,axis-switch-1.1";
+ reg = <0x0 0xa0050000 0x0 0x1000>;
+ xlnx,routing-mode = <0x1>;
+ xlnx,num-si-slots = <0x2>;
+ xlnx,num-mi-slots = <0x4>;
+ clocks = <&vid_stream_clk>, <&misc_clk_0>;
+ clock-names = "aclk", "s_axi_ctl_clk;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&csirxss_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&vcap_csirxss0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap_csirxss1_in>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ switch_out2: endpoint {
+ remote-endpoint = <&vcap_csirxss2_in>;
+ };
+ };
+ port@5 {
+ reg = <5>;
+ switch_out3: endpoint {
+ remote-endpoint = <&vcap_csirxss3_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
new file mode 100644
index 000000000000..cdb0886cf975
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cfa.txt
@@ -0,0 +1,58 @@
+Xilinx Color Filter Array (CFA)
+-------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cfa-7.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cfa has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be SENSOR_MONO for the input port (0), and RBG for
+ the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+- xlnx, cfa-pattern: Must be one of "rggb", "grbg", "gbrg", and "bggr" for the
+ input port (0). Must not be specified for the output port (1).
+
+Example:
+
+ cfa_0: cfa@400b0000 {
+ compatible = "xlnx,v-cfa-7.0";
+ reg = <0x400b0000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_SENSOR_MONO>;
+ xlnx,video-width = <8>;
+ xlnx,cfa-pattern = "rggb";
+
+ cfa0_in: endpoint {
+ remote-endpoint = <&spc0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ cfa0_out: endpoint {
+ remote-endpoint = <&ccm0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
new file mode 100644
index 000000000000..f404ee301272
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-cresample.txt
@@ -0,0 +1,54 @@
+Xilinx Chroma Resampler (CRESAMPLE)
+-----------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-cresample-4.0".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The cresample as han input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of YUV_444, YUV_422 or YUV_420 for the input
+ port (0), and one of YUV_422 or YUV_420 for the output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ cresample_0: cresample@40120000 {
+ compatible = "xlnx,v-cresample-4.0";
+ reg = <0x40120000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ cresample0_in: endpoint {
+ remote-endpoint = <&rgb2yuv0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ cresample0_out: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
new file mode 100644
index 000000000000..9b3aff413e0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
@@ -0,0 +1,62 @@
+Xilinx Video Demosaic IP
+-----------------------------
+The Xilinx Video Demosaic IP is used to interface to a Bayer video source.
+
+The driver set default Sink Pad media bus format to RGGB.
+The IP and driver only support RGB as its Source Pad media format.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-demosaic".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the Demosaic ap_clk.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for GPIO that asserts Demosaic IP (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Required port properties:
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+ demosaic_1: demosaic@a00b0000 {
+ compatible = "xlnx,v-demosaic";
+ reg = <0x0 0xa00b0000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ demosaic_in: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ demosaic_out: endpoint {
+ remote-endpoint = <&gamma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
new file mode 100644
index 000000000000..7bd750f009b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
@@ -0,0 +1,63 @@
+Xilinx Video Gamma Correction IP
+-----------------------------------
+The Xilinx Video Gamma Correction IP is used to provide RGB gamma correction.
+The IP provides a look up table for each R,G and B components.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-gamma-lut".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk
+ signal of Video Gamma Lookup.
+
+- xlnx,max-height: Maximum number of lines. Valid range is 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line. Valid range is 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that asserts Gamma IP (AP_RST_N) reset
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The Gamma LUT IP has an input port (0) and an output port (1).
+
+
+Required port properties:
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt. Can be either 8 or 10.
+
+Example:
+
+ gamma_lut_1: gamma_lut_1@0xa0080000 {
+ compatible = "xlnx,v-gamma-lut";
+ reg = <0x0 0xa0080000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 83 1>;
+ xlnx,max-height = <2160>;
+ xlnx,max-width = <3840>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ xlnx,video-width = <8>;
+
+ gamma_in: endpoint {
+ remote-endpoint = <&demosaic_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ xlnx,video-width = <8>;
+
+ gamma_out: endpoint {
+ remote-endpoint = <&csc_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
new file mode 100644
index 000000000000..a6db3040565a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-hls.txt
@@ -0,0 +1,64 @@
+Xilinx High-Level Synthesis Core (HLS)
+--------------------------------------
+
+High-Level Synthesis cores are synthesized from a high-level function
+description developed by the user. As such their functions vary widely, but
+they all share a set of common characteristics that allow them to be described
+by common bindings.
+
+
+Required properties:
+
+- compatible: This property must contain "xlnx,v-hls" to indicate that the
+ core is compatible with the generic Xilinx HLS DT bindings. It can also
+ contain a more specific string to identify the HLS core implementation. The
+ value of those implementation-specific strings is out of scope for these DT
+ bindings.
+
+- reg: Physical base address and length of the registers sets for the device.
+ The HLS core has two registers sets, the first one contains the core
+ standard registers and the second one contains the custom user registers.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The HLS core has one input port (0) and one output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Video format as defined in video.txt.
+- xlnx,video-width: Video width as defined in video.txt.
+
+Example:
+
+ hls_0: hls@43c00000 {
+ compatible = "xlnx,v-hls-sobel", "xlnx,v-hls";
+ reg = <0x43c00000 0x24>, <0x43c00024 0xa0>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_in: endpoint {
+ remote-endpoint = <&vdma_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ hls0_out: endpoint {
+ remote-endpoint = <&vdma_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
new file mode 100644
index 000000000000..3aea1f36a6ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-multi-scaler.txt
@@ -0,0 +1,95 @@
+Xilinx mem2mem Multi Video Scaler (XM2MSC)
+-----------------------------------------
+
+Required propertie(s):
+- compatible : Should be "xlnx,v-multi-scaler-v1.0"
+- clocks : Input clock specifier. Refer to common clk bindings.
+- interrupt-parent : Interrupt controller the interrupt is routed through
+- interrupts : Should contain MultiScaler interrupt
+- reset-gpios : Should contain GPIO reset phandle
+- reg : Physical base address and
+ length of the registers set for the device.
+- xlnx,max-chan : Maximum number of supported scaling channels (1 - 8)
+- xlnx,max-width : Maximum number of supported column/width (64 - 3840)
+- xlnx,max-height : Maximum number of supported row/height (64 - 2160)
+- xlnx,dma-addr-width : dma address width (either 32 or 64)
+- xlnx,pixels-per-clock : pixels per clock set in IP (1, 2 or 4)
+- xlnx,vid-formats : A list of strings indicating what video memory
+ formats the IP has been configured to support.
+ See VIDEO FORMATS table below and examples.
+- xlnx,num-taps : The number of filter taps for scaling (6, 8, 10, 12)
+
+VIDEO FORMATS
+The following table describes the legal string values to be used for
+the xlnx,vid-formats property. To the left is the string value and the
+column to the right describes the format.
+
+IP FORMAT DTS String Description
+-------------|----------------|---------------------
+RGB8 bgr888 Packed RGB, 8 bits per component.
+ Every RGB pixel in memory is represented with
+ 24 bits.
+RGBX8 xbgr8888 Packed RGB, 8 bits per component. Every RGB
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+BGRX8 xrgb8888 Packed BGR, 8 bits per component. Every BGR
+ pixel in memory is represented with 32 bits.
+ Bits[31:24] do not contain pixel information.
+RGBX10 xbgr2101010 Packed RGB, 10 bits per component. Every RGB
+ pixel is represented with 32 bits. Bits[31:30]
+ do not contain any pixel information.
+YUV8 vuy888 Packed YUV 4:4:4, 8 bits per component. Every
+ YUV 4:4:4 pixel in memory is represented with
+ 24 bits.
+YUVX8 xvuy8888 Packed YUV 4:4:4, 8 bits per component.
+ Every YUV 4:4:4 pixel in memory is represented
+ with 32 bits. Bits[31:24] do not contain pixel
+ information.
+YUYV8 yuyv Packed YUV 4:2:2, 8 bits per component. Every
+ two YUV 4:2:2 pixels in memory are represented
+ with 32 bits.
+UYVY8 uyvy Packed YUV 4:2:2, 8 bits per component.
+ Every two YUV 4:2:2 pixels in memory are
+ represented with 32 bits.
+YUVX10 yuvx2101010 Packed YUV 4:4:4, 10 bits per component.
+ Every YUV 4:4:4 pixel is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+Y8 y8 Packed Luma-Only, 8 bits per component. Every
+ luma-only pixel in memory is represented with
+ 8 bits. Y8 is presented as YUV 4:4:4 on the
+ AXI4-Stream interface.
+Y10 y10 Packed Luma-Only, 10 bits per component. Every
+ three luma-only pixels in memory is represented
+ with 32 bits. Y10 is presented as YUV 4:4:4 on
+ the AXI4-Stream interface.
+Y_UV8 nv16 Semi-planar YUV 4:2:2 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV8_420 nv12 Semi-planar YUV 4:2:0 with 8 bits per component.
+ Y and UV stored in separate planes.
+Y_UV10 xv20 Semi-planar YUV 4:2:2 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+Y_UV10_420 xv15 Semi-planar YUV 4:2:0 with 10 bits per component.
+ Every 3 pixels is represented with 32 bits.
+ Bits[31:30] do not contain any pixel information.
+ Y and UV stored in separate planes.
+
+Example
+
+v_multi_scaler_0: v_multi_scaler@a0000000 {\
+ clocks = <&clk 71>;
+ compatible = "xlnx,v-multi-scaler-v1.0";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ xlnx,vid-formats = "bgr888","vuy888";
+ reset-gpios = <&gpio 78 1>;
+ xlnx,max-chan = <0x01>;
+ xlnx,dma-addr-width = <0x20>;
+ xlnx,pixels-per-clock = /bits/ 8 <2>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+ xlnx,num-taps = <6>;
+};
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
new file mode 100644
index 000000000000..cda02cb97a21
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-remapper.txt
@@ -0,0 +1,61 @@
+Xilinx Video Remapper
+---------------------
+
+The IP core remaps input pixel components to produce an output pixel with
+less, more or the same number of components as the input pixel.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-remapper".
+
+- clocks: Reference to the video core clock.
+
+- xlnx,video-width: Video pixel component width, as defined in video.txt.
+
+- #xlnx,s-components: Number of components per pixel at the input port
+ (between 1 and 4 inclusive).
+
+- #xlnx,m-components: Number of components per pixel at the output port
+ (between 1 and 4 inclusive).
+
+- xlnx,component-maps: Remapping configuration represented as an array of
+ integers. The array contains one entry per output component, in the low to
+ high order. Each entry corresponds to the zero-based position of the
+ corresponding input component, or the value 4 to drive a constant value on
+ the output component. For example, to remap RGB to BGR use <2 1 0>, and to
+ remap RBG to xRGB use <1 0 2 4>.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The remapper as an input port (0) and and output port (1).
+
+Example: RBG to xRGB remapper
+
+ remapper_0: remapper {
+ compatible = "xlnx,v-remapper";
+
+ clocks = <&clkc 15>;
+
+ xlnx,video-width = <8>;
+
+ #xlnx,s-components = <3>;
+ #xlnx,m-components = <4>;
+ xlnx,component-maps = <1 0 2 4>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ remap0_in: endpoint {
+ remote-endpoint = <&tpg0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ remap0_out: endpoint {
+ remote-endpoint = <&sobel0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
new file mode 100644
index 000000000000..ecd10fb31ac1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-rgb2yuv.txt
@@ -0,0 +1,54 @@
+Xilinx RGB to YUV (RGB2YUV)
+---------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-rgb2yuv-7.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The rgb2yuv has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be RBG for the input port (0) and YUV_444 for the
+ output port (1).
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ rgb2yuv_0: rgb2yuv@40100000 {
+ compatible = "xlnx,v-rgb2yuv-7.1";
+ reg = <0x40100000 0x10000>;
+ clocks = <&clkc 15>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_in: endpoint {
+ remote-endpoint = <&gamma0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_444>;
+ xlnx,video-width = <8>;
+
+ rgb2yuv0_out: endpoint {
+ remote-endpoint = <&cresample0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
new file mode 100644
index 000000000000..0bb9c405f5ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scaler.txt
@@ -0,0 +1,75 @@
+Xilinx Scaler (SCALER)
+------------------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-scaler-8.1".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the video core clock.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2 - 12).
+
+- xlnx,max-num-phases: The maximum number of phases for scaling filter
+ (range: 2 - 64).
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Optional properties:
+
+- xlnx,separate-yc-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the luma and chroma filters.
+ Otherwise a single set of coefficients is shared for both.
+
+- xlnx,separate-hv-coef: When set, this boolean property specifies that
+ the hardware uses separate coefficients for the horizontal and vertical
+ filters. Otherwise a single set of coefficients is shared for both.
+
+Required port properties:
+
+- xlnx,video-format: Must be one of RBG, YUV_422, YUV_422 or YUV_420 for
+ both input port (0) and output port (1). The two formats must be identical.
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_0: scaler@43c30000 {
+ compatible = "xlnx,v-scaler-8.1";
+ reg = <0x43c30000 0x10000>;
+ clocks = <&clkc 15>;
+
+ xlnx,num-hori-taps = <12>;
+ xlnx,num-vert-taps = <12>;
+ xlnx,max-num-phases = <4>;
+ xlnx,separate-hv-coef;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_in: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler0_out: endpoint {
+ remote-endpoint = <&vcap0_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
new file mode 100644
index 000000000000..a05e9712c833
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-scd.txt
@@ -0,0 +1,164 @@
+Xilinx Scene Change Detection IP (SCD)
+--------------------------------------
+
+The Xilinx Scene Change Detection IP contains two blocks: one IP block is used
+for reading video frame data from memory to the device and the other IP block
+is used for determining whether there is a scene change between current and the
+previous frame. The IP supports YUV planar and semi-planar formats. IP only
+needs luma frame to determine the scene change event. The IP supports memory
+based model, which means that it will accept a dma buffer address and perform
+MEM2DEV transfer followed by statistical based image processing and give the
+data back to application if scene change detection is present or not.
+
+Another version of scene change detection IP which supports streaming model,
+which means that IP can be inserted in a capture pipeline. For example,
+"hdmirx -> streaming-scd -> fb_wr" is a typical capture pipeline where
+streaming SCD can be embedded. The IP accespts the AXI video data and perform
+histogram based statistical analysis to detect scene change. The IP supports
+single channel.
+
+Required properties:
+
+- compatible: Should be "xlnx,v-scd"
+
+- reg: Physical base address and length of the registers set for the device
+
+- clocks: Reference to the video core clock.
+
+- reset-gpios: Specifier for a GPIO that assert SCD (AP_RST_N) reset.
+
+- xlnx,memory-based: This is to differentiate between memory based and
+ streaming based IP. The value is 1 for memory based and 0 for streaming
+ based IPs.
+
+- xlnx,numstreams: Maximum active streams IP can support is 8 and this is based
+ on the design.
+
+- xlnx,addrwidth: Size of dma address pointer in IP (either 32 or 64)
+
+- subdev: Each channel will have its own subdev node. Each subdev will have its
+ sink port.
+
+- port: Video port, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+1. Memory based device tree
+
+The following example shows how the device tree would look like for a memory
+based design where 8 streams are enabled.
+
+ scd: scenechange@a0100000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0100000 0x0 0x1fff>;
+ clocks = <&misc_clk_0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reset-gpios = <&gpio 94 1>;
+
+ xlnx,memory-based;
+ xlnx,numstreams = <8>;
+ xlnx,addrwidth = <0x20>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #dma-cells = <1>;
+
+ subdev@0 {
+ port@0 {
+ reg = <0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vcap0_out0>;
+ };
+ };
+ };
+ subdev@1 {
+ port@0 {
+ reg = <0>;
+ scd_in1: endpoint {
+ remote-endpoint = <&vcap0_out1>;
+ };
+ };
+ };
+ subdev@2 {
+ port@0 {
+ reg = <0>;
+ scd_in2: endpoint {
+ remote-endpoint = <&vcap0_out2>;
+ };
+ };
+ };
+ subdev@3 {
+ port@0 {
+ reg = <0>;
+ scd_in3: endpoint {
+ remote-endpoint = <&vcap0_out3>;
+ };
+ };
+ };
+ subdev@4 {
+ port@0 {
+ reg = <0>;
+ scd_in4: endpoint {
+ remote-endpoint = <&vcap0_out4>;
+ };
+ };
+ };
+ subdev@5 {
+ port@0 {
+ reg = <0>;
+ scd_in5: endpoint {
+ remote-endpoint = <&vcap0_out5>;
+ };
+ };
+ };
+ subdev@6 {
+ port@0 {
+ reg = <0>;
+ scd_in6: endpoint {
+ remote-endpoint = <&vcap0_out6>;
+ };
+ };
+ };
+ subdev@7 {
+ port@0 {
+ reg = <0>;
+ scd_in7: endpoint {
+ remote-endpoint = <&vcap0_out7>;
+ };
+ };
+ };
+ };
+
+2. Streaming based device tree
+
+The following example shows how the device tree would look like for a streaming
+based design.
+
+ scd: scenechange@a0280000 {
+ compatible = "xlnx,v-scd";
+ reg = <0x0 0xa0280000 0x0 0x1fff>;
+ clocks = <&clk 72>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ reset-gpios = <&gpio 100 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ xlnx,numstreams = <1>;
+
+ scd {
+ port@0 {
+ reg = <0x0>;
+ scd_in0: endpoint {
+ remote-endpoint = <&vpss_scaler_out>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+ scd_out0: endpoint {
+ remote-endpoint = <&vcap_hdmi_in_1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
new file mode 100644
index 000000000000..91dc3af4a2b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-switch.txt
@@ -0,0 +1,55 @@
+Xilinx Video Switch
+-------------------
+
+Required properties:
+
+ - compatible: Must be "xlnx,v-switch-1.0".
+
+ - reg: Physical base address and length of the registers set for the device.
+
+ - clocks: Reference to the video core clock.
+
+ - #xlnx,inputs: Number of input ports
+ - #xlnx,outputs: Number of outputs ports
+
+ - ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+
+Example:
+
+ switch: switch@43c10000 {
+ compatible = "xlnx,v-switch-1.0";
+ reg = <0x43c10000 0x10000>;
+ clocks = <&clkc 15>;
+
+ #xlnx,inputs = <2>;
+ #xlnx,outputs = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ switch_in0: endpoint {
+ remote-endpoint = <&tpg_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ switch_in1: endpoint {
+ remote-endpoint = <&cresample0_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ switch_out0: endpoint {
+ remote-endpoint = <&scaler0_in>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ switch_out1: endpoint {
+ remote-endpoint = <&vcap0_in1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
index 439351ab2a79..4b2126a78a3f 100644
--- a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-tpg.txt
@@ -6,7 +6,8 @@ Required properties:
- compatible: Must contain at least one of
"xlnx,v-tpg-5.0" (TPG version 5.0)
- "xlnx,v-tpg-6.0" (TPG version 6.0)
+ "xlnx,v-tpg-7.0" (TPG version 7.0)
+ "xlnx,v-tpg-8.0" (TPG version 8.0)
TPG versions backward-compatible with previous versions should list all
compatible versions in the newer to older order.
@@ -23,6 +24,8 @@ Required properties:
Optional properties:
+- xlnx,ppc: Pixels per clock. Valid values are 1, 2, 4 or 8.
+
- xlnx,vtc: A phandle referencing the Video Timing Controller that generates
video timings for the TPG test patterns.
@@ -30,16 +33,26 @@ Optional properties:
input. The GPIO active level corresponds to the selection of VTC-generated
video timings.
+- reset-gpios: Specifier for a GPIO that assert TPG (AP_RST_N) reset.
+ This property is mandatory for TPG v7.0 and above.
+
+- xlnx,max-height: Maximum number of lines.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 7760.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ This property is mandatory for TPG v8.0. Value ranges from 64 to 10328.
+
The xlnx,vtc and timing-gpios properties are mandatory when the TPG is
synthesized with two ports and forbidden when synthesized with one port.
Example:
tpg_0: tpg@40050000 {
- compatible = "xlnx,v-tpg-6.0", "xlnx,v-tpg-5.0";
+ compatible = "xlnx,v-tpg-5.0";
reg = <0x40050000 0x10000>;
clocks = <&clkc 15>;
+ xlnx,ppc = <2>;
xlnx,vtc = <&vtc_3>;
timing-gpios = <&ps7_gpio_0 55 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
new file mode 100644
index 000000000000..b3627af85e6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
@@ -0,0 +1,66 @@
+Xilinx VPSS Color Space Converter (CSC)
+-----------------------------------------
+The Xilinx VPSS Color Space Converter (CSC) is a Video IP that supports
+color space conversion from RGB input to YUV output.
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-csc".
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the clock that drives the ap_clk signal.
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- reset-gpios: Specifier for a GPIO that assert VPSS CSC (AP_RST_N) reset.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for input port (0) and XVIP_VF_RBG, XVIP_VF_YUV_444 or XVIP_VF_YUV_422
+ for output port (1). See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- xlnx,video-width: Video width as defined in video.txt. Must be either 8 or 10.
+
+Example:
+ csc_1:csc@a0040000 {
+ compatible = "xlnx,v-vpss-csc";
+ reg = <0x0 0xa0040000 0x0 0x10000>;
+ clocks = <&vid_stream_clk>;
+ reset-gpios = <&gpio 84 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* Sink Pad */
+ port@0 {
+ reg = <0>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_in: endpoint {
+ remote-endpoint = <&gamma_out>;
+ };
+ };
+ /* Source Pad */
+ port@1 {
+ reg = <1>;
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ csc_out: endpoint {
+ remote-endpoint = <&scalar_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
new file mode 100644
index 000000000000..05ca0cb33cad
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
@@ -0,0 +1,93 @@
+ Xilinx VPSS Scaler
+------------------------
+
+Required properties:
+
+- compatible: Must be "xlnx,v-vpss-scaler-2.2" or "xlnx,v-vpss-scaler-1.0".
+ The older string "xlnx,v-vpss-scaler" will be deprecated.
+
+- reg: Physical base address and length of the registers set for the device.
+
+- clocks: Reference to the AXI Streaming clock feeding the VPSS Scaler AP_CLK
+ and AXI4 Lite control interface clock.
+
+- clock-names: Must contain "aclk_axis" and "aclk_ctrl" in the same order as
+ clocks listed in clocks property.
+
+- xlnx,num-hori-taps, xlnx,num-vert-taps: The number of horizontal and vertical
+ taps for scaling filter(range: 2,4,6,8,10,12).
+
+ A value of 2 represents bilinear filters. A value of 4 represents bicubic.
+ Values 6,8,10,12 represent polyphase filters.
+
+- xlnx,pix-per-clk : The pixels per clock property of the IP
+
+- reset-gpios: Specifier for a GPIO that assert for VPSS Scaler reset.
+ This property is mandatory for the Scaler
+
+- xlnx,max-height: Maximum number of lines.
+ Valid range from 64 to 4320.
+
+- xlnx,max-width: Maximum number of pixels in a line.
+ Valid range from 64 to 8192.
+
+- ports: Video ports, using the DT bindings defined in ../video-interfaces.txt.
+ The scaler has an input port (0) and an output port (1).
+
+Required port properties:
+
+- xlnx,video-format: Must be one of XVIP_VF_RBG or XVIP_VF_YUV_422 for
+ input port (0) and must be XVIP_VF_RBG or XVIP_VF_YUV_422 or for
+ the output port (1).
+ See <dt-bindings/media/xilinx-vip.h> for more details.
+
+- reg: This value represents the media pad of the V4L2 sub-device.
+ A Sink Pad is represented by reg = <0>
+ A Source Pad is represented by reg = <1>
+
+- xlnx,video-width: Video width as defined in video.txt
+
+Example:
+
+ scaler_1:scaler@a0000000 {
+ compatible = "xlnx,v-vpss-scaler-1.0";
+ reg = <0x0 0xa0000000 0x0 0x40000>;
+ clocks = <&vid_stream_clk>, <&misc_clk_2>;
+ clock-names = "aclk_axis", "aclk_ctrl";
+ xlnx,num-hori-taps = <8>;
+ xlnx,num-vert-taps = <8>;
+ xlnx,pix-per-clk = <2>;
+ reset-gpios = <&gpio 87 1>;
+ xlnx,max-width = <3840>;
+ xlnx,max-height = <2160>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ /* Sink Pad */
+ reg = <0>;
+
+ xlnx,video-format = <XVIP_VF_RBG>;
+ xlnx,video-width = <8>;
+
+ scaler_in: endpoint {
+ remote-endpoint = <&csc_out>;
+ };
+ };
+
+ port@1 {
+ /* Source Pad */
+ reg = <1>;
+
+ xlnx,video-format = <XVIP_VF_YUV_422>;
+ xlnx,video-width = <8>;
+
+ scaler_out: endpoint {
+ remote-endpoint = <&vcap_tpg_in>;
+ };
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd-phy.txt b/Documentation/devicetree/bindings/misc/jesd-phy.txt
new file mode 100644
index 000000000000..84535cb1e905
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd-phy.txt
@@ -0,0 +1,24 @@
+* Xilinx JESD204B Phy
+
+Description:
+The LogiCOREâ„¢ IP JESD204 PHY core implements a JESD204B Physical interface supporting
+line rates between 1.0 and 12.5 Gb/s on 1 to 12 lanes using GTX, GTH, or GTP transceivers.
+
+Required properties:
+- compatible = "xlnx,jesd204-phy-2.0"
+- reg = Should contain JESD204B phy registers location and length
+- xlnx,pll-selection = The PLL selection 3 for QPLL and 1 For CPLL
+- xlnx,lanes = No of Lanes
+- xlnx,gt-refclk-freq = Reference frequency in Hz
+- clocks = The phandle to the clock tree
+
+Example:
+++++++++
+ jesd204_phycores:phy@41e10000 {
+ compatible = "xlnx,jesd204-phy-2.0";
+ reg = <0x41e10000 0x10000>;
+ xlnx,gt-refclk-freq = "156250000";
+ xlnx,lanes = <0x1>;
+ xlnx,pll-selection = <0x3>;
+ clocks = <&si570>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/jesd204b.txt b/Documentation/devicetree/bindings/misc/jesd204b.txt
new file mode 100644
index 000000000000..53f8192c8afa
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/jesd204b.txt
@@ -0,0 +1,28 @@
+* Xilinx JESD204B core
+
+Description:
+The LogiCOREâ„¢ IP JESD204 core implements a JESD204B core
+
+Required properties:
+- compatible = Should be one of
+ "xlnx,jesd204-5.1";
+ "xlnx,jesd204-5.2";
+ "xlnx,jesd204-6.1";
+- reg = Should contain JESD204B registers location and length
+- xlnx,frames-per-multiframe = No of frames per multiframe
+- xlnx,bytes-per-frame = No of bytes per frame
+- xlnx,lanes = No of Lanes
+- xlnx,subclass = subclass
+- xlnx,node-is-transmit = should be present only for transmit nodes
+
+Example:
+++++++++
+jesd_Tx_axi_0: jesd_Tx@44a20000 {
+ compatible = "xlnx,jesd204-5.1";
+ reg = <0x44a20000 0x10000>;
+ xlnx,frames-per-multiframe = <30>;
+ xlnx,bytes-per-frame = <2>;
+ xlnx,subclass = <1>;
+ xlnx,lanes = <0x2>;
+ xlnx,node-is-transmit;
+};
diff --git a/Documentation/devicetree/bindings/misc/xlnx,axi-traffic-gen.txt b/Documentation/devicetree/bindings/misc/xlnx,axi-traffic-gen.txt
new file mode 100644
index 000000000000..6edb8f6a3a10
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,axi-traffic-gen.txt
@@ -0,0 +1,25 @@
+* Xilinx AXI Traffic generator IP
+
+Required properties:
+- compatible: "xlnx,axi-traffic-gen"
+- interrupts: Should contain AXI Traffic Generator interrupts.
+- interrupt-parent: Must be core interrupt controller.
+- reg: Should contain AXI Traffic Generator registers location and length.
+- interrupt-names: Should contain both the intr names of device - error
+ and completion.
+- xlnx,device-id: Device instance Id.
+
+Optional properties:
+- clocks: Input clock specifier. Refer to common clock bindings.
+
+Example:
+++++++++
+axi_traffic_gen_1: axi-traffic-gen@76000000 {
+ compatible = "xlnx,axi-traffic-gen-1.0", "xlnx,axi-traffic-gen";
+ clocks = <&clkc 15>;
+ interrupts = <0 2 2 2>;
+ interrupt-parent = <&axi_intc_1>;
+ interrupt-names = "err-out", "irq-out";
+ reg = <0x76000000 0x800000>;
+ xlnx,device-id = <0x0>;
+} ;
diff --git a/Documentation/devicetree/bindings/misc/xlnx,fclk.txt b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
new file mode 100644
index 000000000000..e1a1acc6c5ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/xlnx,fclk.txt
@@ -0,0 +1,12 @@
+* Xilinx fclk clock enable
+Temporary solution for enabling the PS_PL clocks.
+
+Required properties:
+- compatible: "xlnx,fclk"
+
+Example:
+++++++++
+fclk0: fclk0 {
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 428685eb2ded..2ee70cf44497 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -51,6 +51,8 @@ Optional Properties:
properly. Test mode can be used to force the controller to function.
- xlnx,int-clock-stable-broken: when present, the controller always reports
that the internal clock is stable even when it is not.
+ - pinctrl-0: pin control group to be used for this controller.
+ - pinctrl-names: must contain a "default" entry.
- xlnx,mio-bank: When specified, this will indicate the MIO bank number in
which the command and data lines are configured. If not specified, driver
diff --git a/Documentation/devicetree/bindings/mtd/arasan_nand.txt b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
new file mode 100644
index 000000000000..546ed98d9777
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/arasan_nand.txt
@@ -0,0 +1,33 @@
+Arasan NAND Flash Controller with ONFI 3.1 support
+
+Required properties:
+- compatible: Should be "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+- reg: Memory map for module access
+- interrupt-parent: Interrupt controller the interrupt is routed through
+- interrupts: Should contain the interrupt for the device
+- clock-name: List of input clocks - "sys", "flash"
+ (See clock bindings for details)
+- clocks: Clock phandles (see clock bindings for details)
+
+Required properties for child node:
+- nand-ecc-mode: see nand.txt
+
+For NAND partition information please refer the below file
+Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+ nfc: nand@ff100000 {
+ compatible = "xlnx,zynqmp-nand", "arasan,nfc-v3p10"
+ reg = <0x0 0xff100000 0x1000>;
+ clock-name = "sys", "flash"
+ clocks = <&misc_clk &misc_clk>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>
+
+ nand@0 {
+ reg = <0>
+ nand-ecc-mode = "hw";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
index 945be7d5b236..4709bb57a2c1 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -5,6 +5,7 @@ Required properties:
Generic default - "cdns,qspi-nor".
For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor".
For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor".
+ For xilinx versal - "xlnx,versal-ospi-1.0".
- reg : Contains two entries, each of which is a tuple consisting of a
physical address and length. The first entry is the address and
length of the controller register set. The second entry is the
@@ -14,6 +15,7 @@ Required properties:
- cdns,fifo-depth : Size of the data FIFO in words.
- cdns,fifo-width : Bus width of the data FIFO in bytes.
- cdns,trigger-address : 32-bit indirect AHB trigger address.
+- reset-gpios : GPIO to be used to reset the OSPI flash device.
Optional properties:
- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index f03be904d3c2..51125e31e1e6 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -78,6 +78,14 @@ Optional properties:
cannot reboot properly if the flash is left in the "wrong"
state. This boolean flag can be used on such systems, to
denote the absence of a reliable reset mechanism.
+ - multi-die : Some flash devices have multiple dies in it. Read operation
+ in these devices is bounded by its die segment. In a
+ continuous read, across multiple dies, when the last byte of
+ the selected die segment is read, the next byte read is the
+ first byte of the same die segment. So to handle this issue,
+ split a read transaction, that spans across multiple banks,
+ into one read per bank.This boolean flag can be used for such
+ flash devices, to denote the presence of multiple dies.
Example:
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 0b61a90f1592..66cf95c88be7 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -16,6 +16,7 @@ Required properties:
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
+ Use "cdns,versal-gem" for Xilinx Versal.
Or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
For "sifive,fu540-c000-gem", second range is required to specify the
@@ -38,12 +39,18 @@ Optional properties for PHY child node:
up via magic packet.
- phy-handle : see ethernet.txt file in the same directory
+Optional properties:
+- rx-watermark: Set watermark value for pbuf_rxcutthru reg and enable
+ rx partial store and forward, only when compatible = "cdns,zynqmp-gem".
+ Value should be less than 0xFFF.
+
Examples:
macb0: ethernet@fffc4000 {
compatible = "cdns,at32ap7000-macb";
reg = <0xfffc4000 0x4000>;
interrupts = <21>;
+ rx-watermark = /bits/ 16 <0x44>;
phy-mode = "rmii";
local-mac-address = [3a 0e 03 04 05 06];
clock-names = "pclk", "hclk", "tx_clk";
diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
index 5ff37c68c941..671eeeaa68ba 100644
--- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
+++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
@@ -31,7 +31,14 @@ Optional properties:
VSC8531_LINK_100_ACTIVITY (2),
VSC8531_LINK_ACTIVITY (0) and
VSC8531_DUPLEX_COLLISION (8).
-
+- vsc8531,rx-delay : RGMII RX delay. Allowed values are defined in
+ "include/dt-bindings/net/mscc-phy-vsc8531.h".
+ Default value, set by the driver is
+ VSC8531_RGMII_CLK_DELAY_1_1_NS.
+- vsc8531,tx-delay : RGMII TX delay. Allowed values are defined in
+ "include/dt-bindings/net/mscc-phy-vsc8531.h".
+ Default value, set by the driver is
+ VSC8531_RGMII_CLK_DELAY_0_2_NS.
Table: 1 - Edge rate change
----------------------------------------------------------------|
diff --git a/Documentation/devicetree/bindings/net/xilinx-phy.txt b/Documentation/devicetree/bindings/net/xilinx-phy.txt
new file mode 100644
index 000000000000..aeb9917497b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-phy.txt
@@ -0,0 +1,15 @@
+Xilinx PCS/PMA PHY bindings
+
+Required properties:
+ - reg - The ID number for the phy, usually a small integer
+
+Optional properties:
+ - xlnx,phy-type - Describes type 1000BaseX (set to 0x5) or
+ SGMII (set to 0x4)
+
+Example:
+
+ ethernet-phy@9 {
+ reg = <9>;
+ xlnx,phy-type = <0x5>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
new file mode 100644
index 000000000000..e66b64bc10e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx-tsn-ethernet.txt
@@ -0,0 +1,54 @@
+Xilinx TSN (time sensitive networking) TEMAC axi ethernet driver (xilinx_axienet)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ethernet-1.00.a".
+- reg : Physical base address and size of the TSN registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file [1].
+- local-mac-address : See ethernet.txt file [1].
+- phy-mode : see ethernet.txt file [1].
+
+Optional properties:
+- xlnx,tsn : Denotes a ethernet with TSN capabilities.
+- xlnx,tsn-slave : Denotes a TSN slave port.
+- xlnx,txcsum : Tx checksum mode (Full, Partial and None).
+- xlnx,rxcsum : Rx checksum mode (Full, Partial and None).
+- xlnx,phy-type : Xilinx phy device type. See xilinx-phy.txt [2].
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
+- xlnx,num-queue : Number of queue supported in current design, range is
+ 2 to 5 and default value is 5.
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,qbv-addr : Denotes mac scheduler physical base address.
+- xlnx,qbv-size : Denotes mac scheduler address space size.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+[2] Documentation/devicetree/bindings/net/xilinx-phy.txt
+
+Example:
+
+ tsn_emac_0: tsn_mac@80040000 {
+ compatible = "xlnx,tsn-ethernet-1.00.a";
+ interrupt-parent = <&gic>;
+ interrupts = <0 104 4 0 106 4 0 91 4 0 110 4>;
+ interrupt-names = "interrupt_ptp_rx_1", "interrupt_ptp_tx_1", "mac_irq_1", "interrupt_ptp_timer";
+ local-mac-address = [ 00 0A 35 00 01 0e ];
+ phy-mode = "rgmii";
+ reg = <0x0 0x80040000 0x0 0x14000>;
+ tsn,endpoint = <&tsn_ep>;
+ xlnx,tsn;
+ xlnx,tsn-slave;
+ xlnx,phy-type = <0x3>;
+ xlnx,eth-hasnobuf;
+ xlnx,num-queue = <0x2>;
+ xlnx,num-tc = <0x3>;
+ xlnx,qbv-addr = <0x80054000>;
+ xlnx,qbv-size = <0x2000>;
+ xlnx,txsum = <0>;
+ xlnx,rxsum = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
index 7360617cdedb..7d82c6c771df 100644
--- a/Documentation/devicetree/bindings/net/xilinx_axienet.txt
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -1,34 +1,45 @@
XILINX AXI ETHERNET Device Tree Bindings
--------------------------------------------------------
-Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core
-provides connectivity to an external ethernet PHY supporting different
-interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two
-segments of memory for buffering TX and RX, as well as the capability of
-offloading TX/RX checksum calculation off the processor.
+This driver supports following MAC configurations-
+a) AXI 1G/2.5G Ethernet Subsystem.
+b) 10G/25G High Speed Ethernet Subsystem.
+c) 10 Gigabit Ethernet Subsystem.
+d) USXGMII Ethernet Subsystem.
+
+AXI 1G/2.5G Ethernet Subsystem- also called AXI 1G/2.5G Ethernet Subsystem,
+the xilinx axi ethernet IP core provides connectivity to an external ethernet
+PHY supporting different interfaces: MII, GMII, RGMII, SGMII, 1000BaseX.
+It also includes two segments of memory for buffering TX and RX, as well as
+the capability of offloading TX/RX checksum calculation off the processor.
Management configuration is done through the AXI interface, while payload is
sent and received through means of an AXI DMA controller. This driver
includes the DMA driver code, so this driver is incompatible with AXI DMA
driver.
-For more details about mdio please refer phy.txt file in the same directory.
+For details about MDIO please refer phy.txt [1].
Required properties:
-- compatible : Must be one of "xlnx,axi-ethernet-1.00.a",
- "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a"
+- compatible : Must be one of "xlnx,axi-ethernet-1.00.a" or
+ "xlnx,axi-ethernet-1.01.a" or "xlnx,axi-ethernet-2.01.a"
+ for 1G MAC,
+ "xlnx,ten-gig-eth-mac" for 10 Gigabit Ethernet Subsystem,
+ "xlnx,xxv-ethernet-1.0" for 10G/25G MAC,
+ "xlnx,axi-2_5-gig-ethernet-1.0" for 2.5G MAC and
+ "xlnx,xxv-usxgmii-ethernet-1.0" for USXGMII.
- reg : Address and length of the IO space, as well as the address
and length of the AXI DMA controller IO space, unless
axistream-connected is specified, in which case the reg
attribute of the node referenced by it is used.
- interrupts : Should be a list of 2 or 3 interrupts: TX DMA, RX DMA,
- and optionally Ethernet core. If axistream-connected is
- specified, the TX/RX DMA interrupts should be on that node
- instead, and only the Ethernet core interrupt is optionally
- specified here.
+ and optionally Ethernet core.
- phy-handle : Should point to the external phy device.
See ethernet.txt file in the same directory.
- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
+Required properties (When AxiEthernet is configured with MCDMA):
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel.
+- interrupt-names : Should contain the interrupt names.
Optional properties:
- phy-mode : See ethernet.txt
@@ -38,39 +49,71 @@ Optional properties:
1 to enable partial TX checksum offload,
2 to enable full TX checksum offload
- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
-- clocks : AXI bus clock for the device. Refer to common clock bindings.
- Used to calculate MDIO clock divisor. If not specified, it is
- auto-detected from the CPU clock (but only on platforms where
- this is possible). New device trees should specify this - the
- auto detection is only for backward compatibility.
-- axistream-connected: Reference to another node which contains the resources
- for the AXI DMA controller used by this device.
- If this is specified, the DMA-related resources from that
- device (DMA registers and DMA TX/RX interrupts) rather
- than this one will be used.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- clock-names : Input clock names. Refer to IP PG for signal description.
+ 1G/2.5G: s_axi_lite_clk, axis_clk and ref_clk.
+ 10G/25G and USXGMII: s_axi_aclk, rx_core_clk and dclk.
+ 10 Gigabit: s_axi_aclk and dclk.
+ AXI DMA and MCDMA: m_axi_sg_aclk, m_axi_mm2s_aclk and
+ m_axi_s2mm_aclk.
- mdio : Child node for MDIO bus. Must be defined if PHY access is
required through the core's MDIO interface (i.e. always,
unless the PHY is accessed through a different bus).
+- dma-coherent : Present if dma operations are coherent.
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non-processor mode.
+- xlnx,rxtsfifo : Configures the axi fifo for receive timestamping.
+
+Optional properties for connected DMA node:
+- xlnx,addrwidth : Specify the width of the DMA address space in bits.
+ Value type is u8. Valid range is 32-64. Default is 32.
+- xlnx,include-dre : Tells whether DMA h/w is configured with data
+ realignment engine(DRE) or not.
+
+Optional properties (When USXGMII is in use):
+- xlnx,usxgmii-rate : USXGMII PHY speed - can be 10, 100, 1000, 2500,
+ 5000 or 10000.
+
+Optional properties (When AxiEthernet is configured with MCDMA):
+- xlnx,num-queues : Number of queues h/w configured for.
+
+NOTE: Time Sensitive Networking (TSN) related DT bindings are explained in [4].
+
+[1] Documentation/devicetree/bindings/net/phy.txt
+[2] Documentation/devicetree/bindings/net/ethernet.txt
+[3] Documentation/devicetree/bindings/net/xilinx-phy.txt
+[4] Documentation/devicetree/bindings/net/xilinx_tsn.txt
+
+
+Example: AXI 1G/2.5G Ethernet Subsystem + AXIDMA
+
+ axi_eth_0_dma: dma@80040000 {
+ #dma-cells = <1>;
+ compatible = "xlnx,eth-dma";
+ xlnx,addrwidth = /bits/ 8 <32>;
+ <snip>
+ };
-Example:
- axi_ethernet_eth: ethernet@40c00000 {
- compatible = "xlnx,axi-ethernet-1.00.a";
- device_type = "network";
- interrupt-parent = <&microblaze_0_axi_intc>;
- interrupts = <2 0 1>;
- clocks = <&axi_clk>;
- phy-mode = "mii";
- reg = <0x40c00000 0x40000 0x50c00000 0x40000>;
- xlnx,rxcsum = <0x2>;
- xlnx,rxmem = <0x800>;
- xlnx,txcsum = <0x2>;
- phy-handle = <&phy0>;
- axi_ethernetlite_0_mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: phy@0 {
- device_type = "ethernet-phy";
- reg = <1>;
+ axi_eth_0: ethernet@80000000 {
+ axistream-connected = <&axi_eth_0_dma>;
+ compatible = "xlnx,axi-ethernet-1.00.a";
+ device_type = "network";
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 91 4>;
+ phy-handle = <&phy2>;
+ phy-mode = "sgmii";
+ reg = <0x0 0x80000000 0x0 0x40000>;
+ xlnx,include-dre ;
+ xlnx,phy-type = <0x5>;
+ xlnx,rxcsum = <0x0>;
+ xlnx,rxmem = <0x1000>;
+ xlnx,txcsum = <0x0>;
+ axi_eth_0_mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy2: phy@2 {
+ device_type = "ethernet-phy";
+ reg = <2>;
+ };
};
- };
};
diff --git a/Documentation/devicetree/bindings/net/xilinx_emaclite.txt b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
new file mode 100644
index 000000000000..989d29efea16
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_emaclite.txt
@@ -0,0 +1,35 @@
+Xilinx Axi Ethernetlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,opb-ethernetlite-1.01.a" or
+ "xlnx,opb-ethernetlite-1.01.b" or
+ "xlnx,opb-ethernetlite-1.00.a" or
+ "xlnx,xps-ethernetlite-2.00.a" or
+ "xlnx,xps-ethernetlite-2.01.a" or
+ "xlnx,xps-ethernetlite-3.00.a" or.
+- reg : Physical base address and size of the Axi ethernetlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+- phy-handle : See ethernet.txt file in the same directory.
+
+Optional properties:
+- local-mac-address : See ethernet.txt file in the same directory.
+ If absent, random mac address is selected.
+- xlnx,tx-ping-pong : If present, hardware supports tx ping pong buffer.
+- xlnx,rx-ping-pong : If present, hardware supports rx ping pong buffer.
+
+Example:
+ axi_ethernetlite_1: ethernet@40e00000 {
+ compatible = "xlnx,axi-ethernetlite-3.0", "xlnx,xps-ethernetlite-1.00.a";
+ device_type = "network";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 0>;
+ local-mac-address = [00 0a 35 00 00 00];
+ phy-handle = <&phy0>;
+ reg = <0x40e00000 0x10000>;
+ xlnx,rx-ping-pong;
+ xlnx,tx-ping-pong;
+ }
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn.txt b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
new file mode 100644
index 000000000000..8ef9fa9f3968
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn.txt
@@ -0,0 +1,14 @@
+Xilinx TSN (time sensitive networking) IP driver (xilinx_tsn_ip)
+-----------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be one of "xlnx,tsn-endpoint-ethernet-mac-1.0",
+ "xlnx,tsn-endpoint-ethernet-mac-2.0" for TSN.
+- reg : Physical base address and size of the TSN registers map.
+
+Example:
+
+ tsn_endpoint_ip_0: tsn_endpoint_ip_0 {
+ compatible = "xlnx,tsn-endpoint-ethernet-mac-2.0";
+ reg = <0x0 0x80040000 0x0 0x40000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
new file mode 100644
index 000000000000..f42e5417d164
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_ep.txt
@@ -0,0 +1,35 @@
+Xilinx TSN (time sensitive networking) EndPoint Driver (xilinx_tsn_ep)
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-ep"
+- reg : Physical base address and size of the TSN Endpoint
+ registers map
+- interrupts : Property with a value describing the interrupt
+- interrupts-names : Property denotes the interrupt names.
+- interrupt-parent : Must be core interrupt controller.
+- local-mac-address : See ethernet.txt [1].
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,channel-ids : Queue Identifier associated with the MCDMA Channel, range
+ is Tx: "1 to 2" and Rx: "2 to 5", default value is "1 to 5".
+- xlnx,eth-hasnobuf : Used when 1G MAC is configured in non processor mode.
+
+[1] Documentation/devicetree/bindings/net/ethernet.txt
+
+Example:
+
+ tsn_ep: tsn_ep@80056000 {
+ compatible = "xlnx,tsn-ep";
+ reg = <0x0 0x80056000 0x0 0xA000>;
+ xlnx,num-tc = <0x3>;
+ interrupt-names = "tsn_ep_scheduler_irq";
+ interrupt-parent = <&gic>;
+ interrupts = <0 111 4>;
+ local-mac-address = [00 0A 35 00 01 10];
+ xlnx,channel-ids = "1","2","3","4","5";
+ xlnx,eth-hasnobuf ;
+ };
diff --git a/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
new file mode 100644
index 000000000000..898e5b7b57e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_tsn_switch.txt
@@ -0,0 +1,23 @@
+Xilinx TSN (time sensitive networking) Switch Driver (xilinx_tsn_switch)
+-----------------------------------------------------------------------------
+
+Required properties:
+- compatible : Should be "xlnx,tsn-switch"
+- reg : Physical base address and size of the TSN registers map.
+
+Optional properties:
+- xlnx,num-tc : Number of traffic class supported in current design,
+ range is 2,3 and default value is 3. It denotes
+ the traffic classes based on VLAN-PCP value.
+- xlnx,has-hwaddr-learning : Denotes hardware address learning support
+- xlnx,has-inband-mgmt-tag : Denotes inband management support
+
+Example:
+
+ epswitch: tsn_switch@80078000 {
+ compatible = "xlnx,tsn-switch";
+ reg = <0x0 0x80078000 0x0 0x4000>;
+ xlnx,num-tc = <0x3>;
+ xlnx,has-hwaddr-learning ;
+ xlnx,has-inband-mgmt-tag ;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
index 4881561b3a02..be126ccf4802 100644
--- a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt
@@ -25,9 +25,78 @@ firmware {
#size-cells = <1>;
/* Data cells */
- soc_revision: soc_revision {
+ soc_revision: soc_revision@0 {
reg = <0x0 0x4>;
};
+ /*
+ * efuse memory access:
+ * all the efuse feilds need to be read
+ * with the exact size specified in the node
+ */
+ /* DNA */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ /* User 0 */
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ /* User 1 */
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ /* User 2 */
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ /* User 3 */
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ /* User 4 */
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ /* User 5 */
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ /* User 6 */
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ /* User 7 */
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ /* Misc user control bits */
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ /* PUF chash */
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ /* PUF misc */
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ /* SEC_CTRL */
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ /* SPK ID */
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ /* PPK0 hash */
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ /* PPK1 hash */
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
};
};
};
@@ -44,3 +113,22 @@ For example:
...
};
+
+To program efuse memory, one should request specified bytes of size as below,
+NOTE: Efuse bits once programmed cannot be reverted.
+
+ - | TYPE | OFFSET | SIZE(bytes) |
+ - |User-0 | 0x20 | 0x4 |
+ - |User-1 | 0x24 | 0x4 |
+ - |User-2 | 0x28 | 0x4 |
+ - |User-3 | 0x2C | 0x4 |
+ - |User-4 | 0x30 | 0x4 |
+ - |User-5 | 0x34 | 0x4 |
+ - |User-6 | 0x38 | 0x4 |
+ - |User-7 | 0x3c | 0x4 |
+ - |Misc User | 0x40 | 0x4 |
+ - |SEC_CTRL | 0x58 | 0x4 |
+ - |SPK ID | 0x5C | 0x4 |
+ - |AES KEY | 0x60 | 0x20 |
+ - |PPK0 hash | 0xA0 | 0x30 |
+ - |PPK1 hash | 0xD0 | 0x30 |
diff --git a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
index 01bf7fdf4c19..c12bcf0f8947 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/xilinx-nwl-pcie.txt
@@ -18,6 +18,7 @@ Required properties:
"msi1, msi0": interrupt asserted when an MSI is received
"intx": interrupt asserted when a legacy interrupt is received
"misc": interrupt asserted when miscellaneous interrupt is received
+- clocks: Should contain a clock specifier for the device
- interrupt-map-mask and interrupt-map: standard PCI properties to define the
mapping of the PCI interface to interrupt numbers.
- ranges: ranges for the PCI memory regions (I/O space region is not
@@ -52,6 +53,7 @@ nwl_pcie: pcie@fd0e0000 {
<0x0 0x0 0x0 0x2 &pcie_intc 0x2>,
<0x0 0x0 0x0 0x3 &pcie_intc 0x3>,
<0x0 0x0 0x0 0x4 &pcie_intc 0x4>;
+ clocks = <&clkc 23>
msi-parent = <&nwl_pcie>;
reg = <0x0 0xfd0e0000 0x0 0x1000>,
diff --git a/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
new file mode 100644
index 000000000000..70f2bb52dbab
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-xdma-pl-pcie.txt
@@ -0,0 +1,133 @@
+* Xilinx XDMA PL PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- compatible: Should contain "xlnx,xdma-host-3.00"
+- reg: Should contain XDMA PCIe registers location and length
+- device_type: must be "pci"
+- interrupts: Should contain AXI PCIe interrupt
+- interrupt-map-mask,
+ interrupt-map: standard PCI properties to define the mapping of the
+ PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+ supported by hardware)
+ Please refer to the standard PCI bus binding document for a more
+ detailed explanation
+
+For MSI DECODE mode:
+- interrupt-names: Must include the following entries:
+ "misc": interrupt asserted when legacy or error interrupt is received
+ "msi1, msi0": interrupt asserted when an MSI is received
+
+Interrupt controller child node
++++++++++++++++++++++++++++++++
+Required properties:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+ address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+
+NOTE:
+The core provides a single interrupt for both INTx/MSI messages. So,
+created a interrupt controller node to support 'interrupt-map' DT
+functionality. The driver will create an IRQ domain for this map, decode
+the four INTx interrupts in ISR and route them to this domain.
+
+
+Example:
+++++++++
+MSI FIFO mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>,
+ <0 0 0 2 &pcie_intc_0 2>,
+ <0 0 0 3 &pcie_intc_0 3>,
+ <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 89 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
+
+MSI DECODE mode:
+ xdma_0: axi-pcie@a0000000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,xdma-host-3.00";
+ device_type = "pci";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>, <0 0 0 2 &pcie_intc_0 2>, <0 0 0 3 &pcie_intc_0 3>, <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "misc", "msi0", "msi1";
+ interrupts = <0 89 4>, <0 90 4>, <0 91 4>;
+ ranges = <0x02000000 0x00000000 0xB0000000 0x0 0xB0000000 0x00000000 0x01000000>,
+ <0x43000000 0x00000005 0x00000000 0x00000005 0x00000000 0x00000000 0x01000000>;
+ reg = <0x0 0xA0000000 0x0 0x10000000>;
+ pcie_intc_0: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
+
+Versal CPM host Bridge DT description.
+
+The properties and their meanings are identical to those described in
+above Xilinx XDMA PL PCIe Root Port Bridge description.
+
+Properties that differ are:
+- compatible: Should contain "xlnx,versal-cpm-host-1.00"
+- reg: Should contain configuration space and CPM system level control and
+ status registers, and length
+- reg-names: Must include the following entries:
+ "cfg": configuration space region
+ "cpm_slcr": CPM system level control and status registers
+- msi-map: Maps a Requester ID to an MSI controller and associated MSI
+ sideband data
+
+Refer to the following binding document for more detailed description on
+the use of 'msi-map':
+ Documentation/devicetree/bindings/pci/pci-msi.txt
+
+Example:
+ pci@fca10000 {
+ #address-cells = <3>;
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ compatible = "xlnx,versal-cpm-host-1.00";
+ interrupt-map = <0 0 0 1 &pcie_intc_0 1>,
+ <0 0 0 2 &pcie_intc_0 2>,
+ <0 0 0 3 &pcie_intc_0 3>,
+ <0 0 0 4 &pcie_intc_0 4>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "misc";
+ interrupts = <0 72 4>;
+ ranges = <0x02000000 0x00000000 0xE0000000 0x0 0xE0000000 0x00000000 0x10000000>,
+ <0x43000000 0x00000080 0x00000000 0x00000080 0x00000000 0x00000000 0x80000000>;
+ msi-map = <0x0 &its_gic 0x0 0x10000>;
+ reg = <0x0 0xfca10000 0x0 0x1000>,
+ <0x6 0x00000000 0x0 0x1000000>;
+ reg-names = "cpm_slcr", "cfg";
+ pcie_intc_0: pci-interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller ;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/perf/xilinx-apm.yaml b/Documentation/devicetree/bindings/perf/xilinx-apm.yaml
new file mode 100644
index 000000000000..30418486b9fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/xilinx-apm.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/perf/xilinx-apm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Axi Performance Monitor device tree bindings
+
+maintainers:
+ - Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - xlnx,axi-perf-monitor
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ xlnx,enable-profile:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1]
+ description:
+ Enables the profile mode.
+ maxItems: 1
+
+ xlnx,enable-trace:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1]
+ description:
+ Enables trace mode.
+ maxItems: 1
+
+ xlnx,num-monitor-slots:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ - maximum: 8
+ description:
+ Number of monitor slots.
+
+ xlnx,enable-event-count:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1]
+ description:
+ Enable event count.
+
+ xlnx,enable-event-log:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1]
+ description:
+ Enable event log.
+
+ xlnx,have-sampled-metric-cnt:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Sampled metric counters enabled in APM.
+
+ xlnx,metric-count-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [32, 64]
+ description:
+ Metric Counter width.
+
+ xlnx,num-of-counters:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Number of counters in APM.
+
+ xlnx,metrics-sample-count-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [32, 64]
+ description:
+ Sampled metric counter width.
+
+ xlnx,global-count-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [32, 64]
+ description:
+ Global Clock counter width.
+
+ xlnx,id-filter-32bit:
+ description: APM is in 32-bit mode.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+examples:
+ - |
+ apm@44a00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ interrupt-parent = <&axi_intc_1>;
+ interrupts = <1 2>;
+ reg = <0x44a00000 0x1000>;
+ clocks = <&clkc 15>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <4>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ xlnx,id-filter-32bit;
+ };
diff --git a/Documentation/devicetree/bindings/perf/xlnx-flexnoc-pm.yaml b/Documentation/devicetree/bindings/perf/xlnx-flexnoc-pm.yaml
new file mode 100644
index 000000000000..bd0f345e71e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/xlnx-flexnoc-pm.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/perf/xlnx-flexnoc-pm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx flexnoc Performance Monitor device tree bindings
+
+maintainers:
+ - Arnd Bergmann <arnd@arndb.de>
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+properties:
+ compatible:
+ # Versal SoC based boards
+ items:
+ - enum:
+ - xlnx,flexnoc-pm-2.7
+
+ reg:
+ items:
+ - description: funnel registers
+ - description: baselpd registers
+ - description: basefpd registers
+
+ reg-names:
+ # The core schema enforces this is a string array
+ items:
+ - const: funnel
+ - const: baselpd
+ - const: basefpd
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ performance-monitor@f0920000 {
+ compatible = "xlnx,flexnoc-pm-2.7";
+ reg-names = "funnel", "baselpd", "basefpd";
+ reg = <0x0 0xf0920000 0x0 0x1000>,
+ <0x0 0xf0980000 0x0 0x9000>,
+ <0x0 0xf0b80000 0x0 0x9000>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-zynqmp.txt b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
new file mode 100644
index 000000000000..ed080df891a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-zynqmp.txt
@@ -0,0 +1,119 @@
+Xilinx ZynqMP PHY binding
+
+This binding describes a ZynqMP PHY device that is used to control ZynqMP
+High Speed Gigabit Transceiver(GT). ZynqMP PS GTR provides four lanes
+and are used by USB, SATA, PCIE, Display port and Ethernet SGMMI controllers.
+
+Required properties (controller (parent) node):
+- compatible : Can be "xlnx,zynqmp-psgtr-v1.1" or "xlnx,zynqmp-psgtr"
+ "xlnx,zynqmp-psgtr-v1.1" has the lpd address mapping removed
+
+- reg : Address and length of register sets for each device in
+ "reg-names"
+- reg-names : The names of the register addresses corresponding to the
+ registers filled in "reg":
+ - serdes: SERDES block register set
+ - siou: SIOU block register set
+ - lpd: Low power domain peripherals reset control
+
+Required nodes : A sub-node is required for each lane the controller
+ provides.
+
+Required properties (port (child) nodes):
+lane0:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 0 LANE_NUM FREQUENCY>
+lane1:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 1 LANE_NUM FREQUENCY>
+lane2:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 2 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_DP 1 LANE_NUM FREQUENC>
+ - <PHY_TYPE_SGMII 2 LANE_NUM FREQUENCY>
+lane3:
+- #phy-cells : Should be 4
+ Cell after port phandle is device type from:
+ - <PHY_TYPE_PCIE 3 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SATA 1 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_USB3 1 LANE_NUM FREQUENCY >
+ - <PHY_TYPE_DP 0 LANE_NUM FREQUENCY>
+ - <PHY_TYPE_SGMII 3 LANE_NUM FREQUENCY>
+
+Note: LANE_NUM : This determines which lane's reference clock is shared by controller.
+ FREQUENCY: This the clock frequency at which controller wants to operate.
+
+
+Example:
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr";
+ status = "okay";
+ reg = <0x0 0xfd400000 0x0 0x40000>, <0x0 0xfd3d0000 0x0 0x1000>,
+ <0x0 0xff5e0000 0x0 0x1000>;
+ reg-names = "serdes", "siou", "lpd";
+
+ lane0: lane@0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane@1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane@2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane@3 {
+ #phy-cells = <4>;
+ };
+ };
+
+Specifying phy control of devices
+=================================
+
+Device nodes should specify the configuration required in their "phys"
+property, containing a phandle to the phy port node and a device type.
+
+phys = <PHANDLE CONTROLLER_TYPE CONTROLLER_INSTANCE LANE_NUM LANE_FREQ>;
+
+PHANDLE = &lane0 or &lane1 or &lane2 or &lane3
+CONTROLLER_TYPE = PHY_TYPE_PCIE or PHY_TYPE_SATA or PHY_TYPE_USB
+ or PHY_TYPE_DP or PHY_TYPE_SGMII
+CONTROLLER_INSTANCE = Depends on controller type used, can be any of
+ PHY_TYPE_PCIE : 0 or 1 or 2 or 3
+ PHY_TYPE_SATA : 0 or 1
+ PHY_TYPE_USB : 0 or 1
+ PHY_TYPE_DP : 0 or 1
+ PHY_TYPE_SGMII: 0 or 1 or 2 or 3
+LANE_NUM = Depends on which lane clock is used as ref clk, can be
+ 0 or 1 or 2 or 3
+LANE_FREQ = Frequency that controller can operate, can be any of
+ 19.2Mhz,20Mhz,24Mhz,26Mhz,27Mhz,28.4Mhz,40Mhz,52Mhz,
+ 100Mhz,108Mhz,125Mhz,135Mhz,150Mhz
+
+Example:
+
+#include <dt-bindings/phy/phy.h>
+
+ usb@fe200000 {
+ ...
+ phys = <&lane2 PHY_TYPE_USB3 0 2 2600000>;
+ ...
+ };
+
+ ahci@fd0c0000 {
+ ...
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
new file mode 100644
index 000000000000..3007f6f4705d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.txt
@@ -0,0 +1,275 @@
+ Binding for Xilinx ZynqMP Pinctrl
+
+Required properties:
+- compatible: "xlnx,zynqmp-pinctrl"
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+ZynqMP's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, slew rate, etc.
+
+Each configuration node can consist of multiple nodes describing the pinmux and
+pinconf options. Those nodes can be pinmux nodes or pinconf nodes.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Required properties for pinmux nodes are:
+ - groups: A list of pinmux groups.
+ - function: The name of a pinmux function to activate for the specified set
+ of groups.
+
+Required properties for configuration nodes:
+One of:
+ - pins: A list of pin names
+ - groups: A list of pinmux groups.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinmux subnode:
+ groups, function
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinconf subnode:
+ groups, pins, bias-disable, bias-pull-up, bias-pull-down, slew-rate
+
+ Valid arguments for 'slew-rate' are 'SLEW_RATE_SLOW' and 'SLEW_RATE_FAST' to
+ select between slow and fast respectively.
+
+ Valid values for groups are:
+ ethernet0_0_grp, ethernet1_0_grp, ethernet2_0_grp,
+ ethernet3_0_grp, gemtsu0_0_grp, gemtsu0_1_grp,
+ gemtsu0_2_grp, mdio0_0_grp, mdio1_0_grp,
+ mdio1_1_grp, mdio2_0_grp, mdio3_0_grp,
+ qspi0_0_grp, qspi_ss_0_grp, qspi_fbclk_0_grp,
+ spi0_0_grp, spi0_ss_0_grp, spi0_ss_1_grp,
+ spi0_ss_2_grp, spi0_1_grp, spi0_ss_3_grp,
+ spi0_ss_4_grp, spi0_ss_5_grp, spi0_2_grp,
+ spi0_ss_6_grp, spi0_ss_7_grp, spi0_ss_8_grp,
+ spi0_3_grp, spi0_ss_9_grp, spi0_ss_10_grp,
+ spi0_ss_11_grp, spi0_4_grp, spi0_ss_12_grp,
+ spi0_ss_13_grp, spi0_ss_14_grp, spi0_5_grp,
+ spi0_ss_15_grp, spi0_ss_16_grp, spi0_ss_17_grp,
+ spi1_0_grp, spi1_ss_0_grp, spi1_ss_1_grp,
+ spi1_ss_2_grp, spi1_1_grp, spi1_ss_3_grp,
+ spi1_ss_4_grp, spi1_ss_5_grp, spi1_2_grp,
+ spi1_ss_6_grp, spi1_ss_7_grp, spi1_ss_8_grp,
+ spi1_3_grp, spi1_ss_9_grp, spi1_ss_10_grp,
+ spi1_ss_11_grp, spi1_4_grp, spi1_ss_12_grp,
+ spi1_ss_13_grp, spi1_ss_14_grp, spi1_5_grp,
+ spi1_ss_15_grp, spi1_ss_16_grp, spi1_ss_17_grp,
+ sdio0_0_grp, sdio0_1_grp, sdio0_2_grp,
+ sdio0_3_grp, sdio0_4_grp, sdio0_5_grp,
+ sdio0_6_grp, sdio0_7_grp, sdio0_8_grp,
+ sdio0_9_grp, sdio0_10_grp, sdio0_11_grp,
+ sdio0_12_grp, sdio0_13_grp, sdio0_14_grp,
+ sdio0_15_grp, sdio0_16_grp, sdio0_17_grp,
+ sdio0_18_grp, sdio0_19_grp, sdio0_20_grp,
+ sdio0_21_grp, sdio0_22_grp, sdio0_23_grp,
+ sdio0_24_grp, sdio0_25_grp, sdio0_26_grp,
+ sdio0_27_grp, sdio0_28_grp, sdio0_29_grp,
+ sdio0_30_grp, sdio0_31_grp, sdio0_32_grp,
+ sdio0_pc_0_grp, sdio0_cd_0_grp, sdio0_wp_0_grp,
+ sdio0_pc_1_grp, sdio0_cd_1_grp, sdio0_wp_1_grp,
+ sdio0_pc_2_grp, sdio0_cd_2_grp, sdio0_wp_2_grp,
+ sdio1_0_grp, sdio1_1_grp, sdio1_2_grp,
+ sdio1_3_grp, sdio1_4_grp, sdio1_5_grp,
+ sdio1_6_grp, sdio1_7_grp, sdio1_8_grp,
+ sdio1_9_grp, sdio1_10_grp, sdio1_11_grp,
+ sdio1_12_grp, sdio1_13_grp, sdio1_14_grp,
+ sdio1_15_grp, sdio1_pc_0_grp, sdio1_cd_0_grp,
+ sdio1_wp_0_grp, sdio1_pc_1_grp, sdio1_cd_1_grp,
+ sdio1_wp_1_grp, nand0_0_grp, nand0_ce_0_grp,
+ nand0_rb_0_grp, nand0_dqs_0_grp, nand0_ce_1_grp,
+ nand0_rb_1_grp, nand0_dqs_1_grp, can0_0_grp,
+ can0_1_grp, can0_2_grp, can0_3_grp,
+ can0_4_grp, can0_5_grp, can0_6_grp,
+ can0_7_grp, can0_8_grp, can0_9_grp,
+ can0_10_grp, can0_11_grp, can0_12_grp,
+ can0_13_grp, can0_14_grp, can0_15_grp,
+ can0_16_grp, can0_17_grp, can0_18_grp,
+ can1_0_grp, can1_1_grp, can1_2_grp,
+ can1_3_grp, can1_4_grp, can1_5_grp,
+ can1_6_grp, can1_7_grp, can1_8_grp,
+ can1_9_grp, can1_10_grp, can1_11_grp,
+ can1_12_grp, can1_13_grp, can1_14_grp,
+ can1_15_grp, can1_16_grp, can1_17_grp,
+ can1_18_grp, can1_19_grp, uart0_0_grp,
+ uart0_1_grp, uart0_2_grp, uart0_3_grp,
+ uart0_4_grp, uart0_5_grp, uart0_6_grp,
+ uart0_7_grp, uart0_8_grp, uart0_9_grp,
+ uart0_10_grp, uart0_11_grp, uart0_12_grp,
+ uart0_13_grp, uart0_14_grp, uart0_15_grp,
+ uart0_16_grp, uart0_17_grp, uart0_18_grp,
+ uart1_0_grp, uart1_1_grp, uart1_2_grp,
+ uart1_3_grp, uart1_4_grp, uart1_5_grp,
+ uart1_6_grp, uart1_7_grp, uart1_8_grp,
+ uart1_9_grp, uart1_10_grp, uart1_11_grp,
+ uart1_12_grp, uart1_13_grp, uart1_14_grp,
+ uart1_15_grp, uart1_16_grp, uart1_17_grp,
+ uart1_18_grp, i2c0_0_grp, i2c0_1_grp,
+ i2c0_2_grp, i2c0_3_grp, i2c0_4_grp,
+ i2c0_5_grp, i2c0_6_grp, i2c0_7_grp,
+ i2c0_8_grp, i2c0_9_grp, i2c0_10_grp,
+ i2c0_11_grp, i2c0_12_grp, i2c0_13_grp,
+ i2c0_14_grp, i2c0_15_grp, i2c0_16_grp,
+ i2c0_17_grp, i2c0_18_grp, i2c1_0_grp,
+ i2c1_1_grp, i2c1_2_grp, i2c1_3_grp,
+ i2c1_4_grp, i2c1_5_grp, i2c1_6_grp,
+ i2c1_7_grp, i2c1_8_grp, i2c1_9_grp,
+ i2c1_10_grp, i2c1_11_grp, i2c1_12_grp,
+ i2c1_13_grp, i2c1_14_grp, i2c1_15_grp,
+ i2c1_16_grp, i2c1_17_grp, i2c1_18_grp,
+ i2c1_19_grp, ttc0_clk_0_grp, ttc0_wav_0_grp,
+ ttc0_clk_1_grp, ttc0_wav_1_grp, ttc0_clk_2_grp,
+ ttc0_wav_2_grp, ttc0_clk_3_grp, ttc0_wav_3_grp,
+ ttc0_clk_4_grp, ttc0_wav_4_grp, ttc0_clk_5_grp,
+ ttc0_wav_5_grp, ttc0_clk_6_grp, ttc0_wav_6_grp,
+ ttc0_clk_7_grp, ttc0_wav_7_grp, ttc0_clk_8_grp,
+ ttc0_wav_8_grp, ttc1_clk_0_grp, ttc1_wav_0_grp,
+ ttc1_clk_1_grp, ttc1_wav_1_grp, ttc1_clk_2_grp,
+ ttc1_wav_2_grp, ttc1_clk_3_grp, ttc1_wav_3_grp,
+ ttc1_clk_4_grp, ttc1_wav_4_grp, ttc1_clk_5_grp,
+ ttc1_wav_5_grp, ttc1_clk_6_grp, ttc1_wav_6_grp,
+ ttc1_clk_7_grp, ttc1_wav_7_grp, ttc1_clk_8_grp,
+ ttc1_wav_8_grp, ttc2_clk_0_grp, ttc2_wav_0_grp,
+ ttc2_clk_1_grp, ttc2_wav_1_grp, ttc2_clk_2_grp,
+ ttc2_wav_2_grp, ttc2_clk_3_grp, ttc2_wav_3_grp,
+ ttc2_clk_4_grp, ttc2_wav_4_grp, ttc2_clk_5_grp,
+ ttc2_wav_5_grp, ttc2_clk_6_grp, ttc2_wav_6_grp,
+ ttc2_clk_7_grp, ttc2_wav_7_grp, ttc2_clk_8_grp,
+ ttc2_wav_8_grp, ttc3_clk_0_grp, ttc3_wav_0_grp,
+ ttc3_clk_1_grp, ttc3_wav_1_grp, ttc3_clk_2_grp,
+ ttc3_wav_2_grp, ttc3_clk_3_grp, ttc3_wav_3_grp,
+ ttc3_clk_4_grp, ttc3_wav_4_grp, ttc3_clk_5_grp,
+ ttc3_wav_5_grp, ttc3_clk_6_grp, ttc3_wav_6_grp,
+ ttc3_clk_7_grp, ttc3_wav_7_grp, ttc3_clk_8_grp,
+ ttc3_wav_8_grp, swdt0_clk_0_grp, swdt0_rst_0_grp,
+ swdt0_clk_1_grp, swdt0_rst_1_grp, swdt0_clk_2_grp,
+ swdt0_rst_2_grp, swdt0_clk_3_grp, swdt0_rst_3_grp,
+ swdt0_clk_4_grp, swdt0_rst_4_grp, swdt0_clk_5_grp,
+ swdt0_rst_5_grp, swdt0_clk_6_grp, swdt0_rst_6_grp,
+ swdt0_clk_7_grp, swdt0_rst_7_grp, swdt0_clk_8_grp,
+ swdt0_rst_8_grp, swdt0_clk_9_grp, swdt0_rst_9_grp,
+ swdt0_clk_10_grp, swdt0_rst_10_grp, swdt0_clk_11_grp,
+ swdt0_rst_11_grp, swdt0_clk_12_grp, swdt0_rst_12_grp,
+ swdt1_clk_0_grp, swdt1_rst_0_grp, swdt1_clk_1_grp,
+ swdt1_rst_1_grp, swdt1_clk_2_grp, swdt1_rst_2_grp,
+ swdt1_clk_3_grp, swdt1_rst_3_grp, swdt1_clk_4_grp,
+ swdt1_rst_4_grp, swdt1_clk_5_grp, swdt1_rst_5_grp,
+ swdt1_clk_6_grp, swdt1_rst_6_grp, swdt1_clk_7_grp,
+ swdt1_rst_7_grp, swdt1_clk_8_grp, swdt1_rst_8_grp,
+ swdt1_clk_9_grp, swdt1_rst_9_grp, swdt1_clk_10_grp,
+ swdt1_rst_10_grp, swdt1_clk_11_grp, swdt1_rst_11_grp,
+ swdt1_clk_12_grp, swdt1_rst_12_grp, gpio0_0_grp,
+ gpio0_1_grp, gpio0_2_grp, gpio0_3_grp,
+ gpio0_4_grp, gpio0_5_grp, gpio0_6_grp,
+ gpio0_7_grp, gpio0_8_grp, gpio0_9_grp,
+ gpio0_10_grp, gpio0_11_grp, gpio0_12_grp,
+ gpio0_13_grp, gpio0_14_grp, gpio0_15_grp,
+ gpio0_16_grp, gpio0_17_grp, gpio0_18_grp,
+ gpio0_19_grp, gpio0_20_grp, gpio0_21_grp,
+ gpio0_22_grp, gpio0_23_grp, gpio0_24_grp,
+ gpio0_25_grp, gpio0_26_grp, gpio0_27_grp,
+ gpio0_28_grp, gpio0_29_grp, gpio0_30_grp,
+ gpio0_31_grp, gpio0_32_grp, gpio0_33_grp,
+ gpio0_34_grp, gpio0_35_grp, gpio0_36_grp,
+ gpio0_37_grp, gpio0_38_grp, gpio0_39_grp,
+ gpio0_40_grp, gpio0_41_grp, gpio0_42_grp,
+ gpio0_43_grp, gpio0_44_grp, gpio0_45_grp,
+ gpio0_46_grp, gpio0_47_grp, gpio0_48_grp,
+ gpio0_49_grp, gpio0_50_grp, gpio0_51_grp,
+ gpio0_52_grp, gpio0_53_grp, gpio0_54_grp,
+ gpio0_55_grp, gpio0_56_grp, gpio0_57_grp,
+ gpio0_58_grp, gpio0_59_grp, gpio0_60_grp,
+ gpio0_61_grp, gpio0_62_grp, gpio0_63_grp,
+ gpio0_64_grp, gpio0_65_grp, gpio0_66_grp,
+ gpio0_67_grp, gpio0_68_grp, gpio0_69_grp,
+ gpio0_70_grp, gpio0_71_grp, gpio0_72_grp,
+ gpio0_73_grp, gpio0_74_grp, gpio0_75_grp,
+ gpio0_76_grp, gpio0_77_grp, usb0_0_grp,
+ usb1_0_grp, pmu0_0_grp, pmu0_1_grp,
+ pmu0_2_grp, pmu0_3_grp, pmu0_4_grp,
+ pmu0_5_grp, pmu0_6_grp, pmu0_7_grp,
+ pmu0_8_grp, pmu0_9_grp, pmu0_10_grp,
+ pmu0_11_grp, pcie0_0_grp, pcie0_1_grp,
+ pcie0_2_grp, pcie0_3_grp, pcie0_4_grp,
+ pcie0_5_grp, pcie0_6_grp, pcie0_7_grp,
+ csu0_0_grp, csu0_1_grp, csu0_2_grp,
+ csu0_3_grp, csu0_4_grp, csu0_5_grp,
+ csu0_6_grp, csu0_7_grp, csu0_8_grp,
+ csu0_9_grp, csu0_10_grp, csu0_11_grp,
+ dpaux0_0_grp, dpaux0_1_grp, dpaux0_2_grp,
+ dpaux0_3_grp, pjtag0_0_grp, pjtag0_1_grp,
+ pjtag0_2_grp, pjtag0_3_grp, pjtag0_4_grp,
+ pjtag0_5_grp, trace0_0_grp, trace0_clk_0_grp,
+ trace0_1_grp, trace0_clk_1_grp, trace0_2_grp,
+ trace0_clk_2_grp, testscan0_0_grp
+
+ Valid values for pins are:
+ MIO0 - MIO77
+
+ Valid values for function are:
+ ethernet0, ethernet1, ethernet2, ethernet3, gemtsu0, usb0, usb1, mdio0,
+ mdio1, mdio2, mdio3, qspi0, qspi_fbclk, qspi_ss, spi0, spi1, spi0_ss,
+ spi1_ss, sdio0, sdio0_pc, sdio0_wp, sdio0_cd, sdio1, sdio1_pc, sdio1_wp,
+ sdio1_cd, nand0, nand0_ce, nand0_rb, nand0_dqs, can0, can1, uart0, uart1,
+ i2c0, i2c1, ttc0_clk, ttc0_wav, ttc1_clk, ttc1_wav, ttc2_clk, ttc2_wav,
+ ttc3_clk, ttc3_wav, swdt0_clk, swdt0_rst, swdt1_clk, swdt1_rst, gpio0, pmu0,
+ pcie0, csu0, dpaux0, pjtag0, trace0, trace0_clk, testscan0
+
+The following driver-specific properties as defined here are valid to specify in
+a pin configuration subnode:
+ - io-standard: Configure the pin to use the selected IO standard. Valid
+ arguments are 'IO_STANDARD_LVCMOS33' and 'IO_STANDARD_LVCMOS18'.
+ - schmitt-cmos: Selects either Schmitt or CMOS input for MIO pins. Valid
+ arguments are 'PIN_INPUT_TYPE_SCHMITT' and 'PIN_INPUT_TYPE_CMOS'.
+
+Example:
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ schmitt-cmos = <PIN_INPUT_TYPE_CMOS>;
+ };
+ };
+ };
+ };
+};
+
+uart1 {
+ ...
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+ ...
+
+};
diff --git a/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
new file mode 100644
index 000000000000..44d4cd6a101e
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
@@ -0,0 +1,135 @@
+Xilinx ARM Cortex A53-R5 remoteproc driver
+==========================================
+
+ZynqMP family of devices use two Cortex R5 processors to help with various
+low power / real time tasks.
+
+This driver requires specific ZynqMP hardware design.
+
+ZynqMP R5 Device Node:
+=================================
+A ZynqMP R5 device node is used to represent RPU domain
+within ZynqMP SoC. This device node contains RPU processor
+subnodes.
+
+Required Properties:
+--------------------
+ - compatible : Should be "xlnx,zynqmp-r5-remoteproc-1.0"
+ - core_conf : R5 core configuration (valid string - split or lock-step)
+ - interrupts : Interrupt mapping for remoteproc IPI. It is required if the
+ user uses the remoteproc driver with the RPMsg kernel driver.
+ - interrupt-parent : Phandle for the interrupt controller. It is required if
+ the user uses the remoteproc driver with the RPMsg kernel
+ kernel driver.
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node. It also contains tightly
+coupled memory subnodes.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reserved memory which will be used by R5 processor
+
+
+ZynqMP R5 Remoteproc Device Node:
+=================================
+A ZynqMP R5 Remoteproc device node is used to represent a RPU processor.
+It is a subnode to the ZynqMP R5 device node.
+
+Required Properties:
+--------------------
+ - pnode-id: ZynqMP R5 processor power domain ID which will be used by
+ ZynqMP power management unit to idetify the processor.
+
+Optional Properties:
+--------------------
+ - memory-region: reserved memory which will be used by R5 processor
+ - mboxes: Specify tx and rx mailboxes
+ - mbox-names: List of identifier strings for tx/rx mailbox channel.
+
+ZynqMP R5 TCM Device Node:
+=================================
+The ZynqMP R5 TCM device node is used to represent the TCM memory.
+It is a subnode to the ZynqMP R5 processor.
+
+Required Properties:
+--------------------
+ - reg: TCM address range
+ - pnode-id: TCM power domain ID
+
+
+Example:
+--------
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ /* R5 0 firmware memory in DDR */
+ rproc_0_fw_reserved: rproc@3ed000000 {
+ no-map;
+ reg = <0x0 0x3ed00000 0x0 0x40000>;
+ };
+ /* DMA shared memory between APU and RPU */
+ rproc_0_dma_reserved: rproc@3ed400000 {
+ compatible = "shared-dma-pool";
+ no-map;
+ reg = <0x0 0x3ed40000 0x0 0x100000>;
+ };
+ };
+
+ zynqmp-r5-remoteproc@0 {
+ compatible = "xlnx,zynqmp-r5-remoteproc-1.0";
+ core_conf = "split";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ r5-0: r5@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ memory-region = <&rproc_0_fw_reserved>,
+ <&rproc_0_dma_reserved>;
+ pnode-id = <0x7>;
+ mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
+ mbox-names = "tx", "rx";
+ tcm-a: tcm@0 {
+ reg = <0x0 0xFFE00000 0x0 0x10000>,
+ pnode-id = <0xf>;
+ };
+ tcm-b: tcm@1 {
+ reg = <0x0 0xFFE20000 0x0 0x10000>,
+ pnode-id = <0x10>;
+ };
+ };
+ } ;
+
+ zynqmp_ipi {
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 29 4>;
+ xlnx,ipi-id = <7>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* APU<->RPU0 IPI mailbox controller */
+ ipi_mailbox_rpu0: mailbox@ff90600 {
+ reg = <0xff990600 0x20>,
+ <0xff990620 0x20>,
+ <0xff9900c0 0x20>,
+ <0xff9900e0 0x20>;
+ reg-names = "local_request_region",
+ "local_response_region",
+ "remote_request_region",
+ "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
new file mode 100644
index 000000000000..8a230dc3926e
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/zynq_remoteproc.txt
@@ -0,0 +1,47 @@
+Xilinx ARM Cortex A9-A9 remoteproc driver
+==========================================
+
+Zynq family of devices can use one A9 processor to help with various
+low power / real time tasks.
+
+This driver requires specific Zynq hardware design.
+
+Zynq RemoteProc Device Node:
+=================================
+A zynq_remoteproc device node is used to represent the 2nd A9 instance
+within Zynq SoC.
+
+Required properties:
+--------------------
+ - compatible : should be "xlnx,zynq_remoteproc"
+ - vring0: soft interrupt for kicking from firmware
+ - vring1: soft interrupt for kicking from Linux kernel
+
+Optional Properties:
+--------------------
+ - memory-region: reserved memory which will be used by R5 processor
+
+Example:
+--------
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ rproc_0_reserved: rproc@3e000000 {
+ no-map;
+ reg = <0x3e000000 0x400000>;
+ };
+ rproc_0_dma: rproc@3e800000 {
+ no-map;
+ compatible = "shared-dma-pool";
+ reg = <0x3e800000 0x100000>;
+ };
+ };
+
+ zynq_remoteproc@0 {
+ compatible = "xlnx,zynq_remoteproc";
+ vring0 = <15>;
+ vring1 = <14>;
+ memory-region = <&rproc_0_reserved>, <&rproc_0_dma>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/uartlite.c b/Documentation/devicetree/bindings/serial/uartlite.c
new file mode 100644
index 000000000000..7ae900880d30
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/uartlite.c
@@ -0,0 +1,26 @@
+Xilinx Axi Uartlite controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be either of
+ "xlnx,xps-uartlite-1.00.a"
+ "xlnx,opb-uartlite-1.00.b"
+- reg : Physical base address and size of the Axi Uartlite
+ registers map.
+- interrupts : Property with a value describing the interrupt
+ number.
+- interrupt-parent : Must be core interrupt controller.
+
+Optional properties:
+- port-number : Set Uart port number
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+
+Example:
+serial@800C0000 {
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ reg = <0x0 0x800c0000 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x6e 0x1>;
+ port-number = <0>;
+};
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
new file mode 100644
index 000000000000..b1c1466a34ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
@@ -0,0 +1,23 @@
+Xilinx AI Engine NPI
+--------------------
+
+The Xilinx AI Engine NPI space is where the privileged operations for AI Engine
+device are handled, such as reset and pll. The space is typically meant to be
+owned by platform management software, and this space is accessible only when
+the platform management software grants the access. Thus, this dt binding only
+works in such configuration, and in case the platform locks the access,
+the non-secure software fails to access the device.
+
+This is a temporary solution to allow direct access to NPI space.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai-engine-npi"
+- reg: Physical base address and length of the registers set for the device.
+
+Example:
+
+ aie-npi@f70a0000 {
+ compatible = "xlnx,ai-engine-npi";
+ reg = <0x0 0xf70a0000 0x0 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
new file mode 100644
index 000000000000..b7643a1380d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,ai_engine.txt
@@ -0,0 +1,28 @@
+Xilinx AI Engine
+----------------
+
+The Xilinx AI Engine is a tile processor with many cores (up to 400) that
+can run in parallel. The data routing between cores is configured through
+internal switches, and shim tiles interface with external interconnect, such
+as memory or PL.
+
+Required properties:
+
+- compatible: Must be "xlnx,ai_engine".
+- reg: Physical base address and length of the registers set for the device.
+- interrupt-parent: the phandle to the interrupt controller.
+- interrupts: the interrupt numbers.
+- interrupt-names: Should be "interrupt0", "interrupt1", "interrupt2" or
+ "interrupt3".
+
+Example:
+
+ ai_engine@20000000000 {
+ compatible = "xlnx,ai_engine";
+ reg = <0x200 0x0 0x1 0x0>;
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x94 0x1>,
+ <0x0 0x95 0x1>,
+ <0x0 0x96 0x1>;
+ interrupt-names = "interrupt1", "interrupt2", "interrupt3";
+ };
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
index 6786d6715df0..98474f2accca 100644
--- a/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
+++ b/Documentation/devicetree/bindings/soc/xilinx/xlnx,vcu.txt
@@ -16,16 +16,60 @@ Required properties:
1. vcu slcr
2. Logicore
reg-names should contain name for the each register sequence.
-- clocks: phandle for aclk and pll_ref clocksource
-- clock-names: The identification string, "aclk", is always required for
- the axi clock. "pll_ref" is required for pll.
+- #clock-cells : Must be 1
+- clocks: phandle for aclk, pll_ref and encoder/decoder clocksources
+- clock-names: The identification string,
+ * "aclk", is always required for the axi clock.
+ * "pll_ref" is required for pll.
+ * "vcu_core_enc" is required for VCU core encoder.
+ * "vcu_core_dec" is required for VCU core decoder.
+ * "vcu_mcu_enc" is required for MCU core encoder.
+ * "vcu_mcu_dec" is required for MCU core decoder.
+- ranges
+- VCU Init driver node define the following child nodes:
+ * Allegro encoder driver node
+ - compatible: Must be "al,al5e"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+ * Allegro decoder driver node
+ - compatible: Must be "al,al5d"
+ - reg: There is a one set of register.
+ - interrupts: interrupt number to the cpu.
+ - interrupt-parent: the phandle for the interrupt controller
+ that services interrupts for this device.
+
+Optional properties:
+- reset-gpios : The GPIO used to reset the VCU, if available. Need use this
+ reset gpio when in design 'vcu_resetn' is driven by gpio. See
+ Documentation/devicetree/bindings/gpio/gpio.txt for details.
+
Example:
xlnx_vcu: vcu@a0040000 {
compatible = "xlnx,vcu-logicoreip-1.0";
+ #address-cells = <2>;
+ #size-cells = <2>;
reg = <0x0 0xa0040000 0x0 0x1000>,
<0x0 0xa0041000 0x0 0x1000>;
reg-names = "vcu_slcr", "logicore";
- clocks = <&si570_1>, <&clkc 71>;
- clock-names = "pll_ref", "aclk";
+ reset-gpios = <&gpio 0x4e GPIO_ACTIVE_HIGH>;
+ #clock-cells = <0x1>;
+ clock-names = "pll_ref", "aclk", "vcu_core_enc", "vcu_core_dec", "vcu_mcu_enc", "vcu_mcu_dec";
+ clocks = <&si570_1>, <&clkc 71>, <&xlnx_vcu 1>, <&xlnx_vcu 2>, <&xlnx_vcu 3>, <&xlnx_vcu 4>;
+ ranges;
+ encoder: al5e@a0000000 {
+ compatible = "al,al5e";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
+
+ decoder: al5d@a0020000 {
+ compatible = "al,al5d";
+ reg = <0x0 0xa0020000 0x0 0x10000>;
+ interrupts = <0 89 4>;
+ interrupt-parent = <&gic>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
index cbc93c8f4963..1d255020727d 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
@@ -1,6 +1,6 @@
Device-Tree bindings for Xilinx PL audio formatter
-The IP core supports DMA, data formatting(AES<->PCM conversion)
+The IP core supports DMA, data formatting(packing, conversion)
of audio samples.
Required properties:
@@ -13,8 +13,21 @@ Required properties:
- interrupts-parent: Phandle for interrupt controller.
- interrupts: List of Interrupt numbers.
- reg: Base address and size of the IP core instance.
+ - xlnx,tx: connected audio sink node.
+ Should be one of below supported nodes:
+ 1. HDMI video Tx output
+ 2. I2S transmitter
+ 3. UHDSDI audio embed
+ Only those nodes were supported in sound card driver
+
+ - xlnx,rx: connected audio source node.
+ Should be one of below supported nodes:
+ 1. HDMI video Rx input
+ 2. I2S receiver
+ 3. UHDSDI audio extract
+ Only those nodes were supported in sound card driver
- clock-names: List of input clocks.
- Required elements: "s_axi_lite_aclk", "aud_mclk"
+ Required elements: "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk"
- clocks: Input clock specifier. Refer to common clock bindings.
Example:
@@ -24,6 +37,8 @@ Example:
interrupt-parent = <&gic>;
interrupts = <0 104 4>, <0 105 4>;
reg = <0x0 0x80010000 0x0 0x1000>;
- clock-names = "s_axi_lite_aclk", "aud_mclk";
- clocks = <&clk 71>, <&clk_wiz_1 0>;
+ xlnx,tx = <&i2s_transmitter>;
+ xlnx,rx = <&i2s_receiver>;
+ clock-names = "s_axi_lite_aclk", "m_axis_mm2s_aclk", "aud_mclk", "s_axis_s2mm_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
new file mode 100644
index 000000000000..7eb932913983
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-card.txt
@@ -0,0 +1,17 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Audio Card
+
+The card driver integrates codec and pcm components and represents as a single
+audio device.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-card".
+ - xlnx,dp-snd-pcm: phandle(s) to the ZynqMP DP PCM node.
+ - xlnx,dp-snd-codec: phandle to the ZynqMP DP card node.
+
+Example:
+
+ xlnx_dp_snd_card: dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&xlnx_dp_snd_pcm0>, <&xlnx_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&xlnx_dp_snd_codec0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
new file mode 100644
index 000000000000..d094fdd9d9e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-codec.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort Codec
+
+The codec driver handles the audio clock and format management.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-codec".
+ - clocks: The phandle for the audio clock. The audio clock should be
+ configured to the correct audio clock rate, which should be one of
+ (44100 * 512) or (48000 * 512).
+ - clock-names: The identification string should be "aud_clk".
+
+Example:
+
+ xlnx_dp_snd_codec0: dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clocks = <&dp_aud_clk>;
+ clock-names = "aud_clk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
new file mode 100644
index 000000000000..303232a2a375
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,dp-snd-pcm.txt
@@ -0,0 +1,18 @@
+Device-Tree bindings for Xilinx ZynqMP DisplayPort PCM
+
+The DPDMA driver of ZynqMP DisplayPort subsystem is based on DMA engine,
+and the DP PCM driver is based on snd dmaengine helpers.
+
+Required properties:
+ - compatible: Should be "xlnx,dp-snd-pcm".
+ - dmas: the phandle list of DMA specifiers. The dma channel ID should be one
+ of 4 for audio0 channel or 5 for audio1 channel.
+ - dma-names: the indentifier strings for DMAs. The value should be "tx".
+
+Example:
+
+ xlnx_dp_snd_pcm0: dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
index 5e7c7d5bb60a..19ab3cd31f2c 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
@@ -11,18 +11,31 @@ Required property common to both I2S playback and capture:
- xlnx,dwidth: sample data width. Can be any of 16, 24.
- xlnx,num-channels: Number of I2S streams. Can be any of 1, 2, 3, 4.
supported channels = 2 * xlnx,num-channels
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for I2S Tx: "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk".
+ Required elements for I2S Rx: "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
Example:
i2s_receiver@a0080000 {
compatible = "xlnx,i2s-receiver-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "m_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&clk 71>;
reg = <0x0 0xa0080000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
i2s_transmitter@a0090000 {
compatible = "xlnx,i2s-transmitter-1.0";
+ clock-names = "s_axi_ctrl_aclk", "aud_mclk", "s_axis_aud_aclk";
+ clocks = <&clk 71>, <&audio_ss_0_clk_wiz_0 0>, <&audio_ss_0_clk_wiz_0 0>;
reg = <0x0 0xa0090000 0x0 0x10000>;
xlnx,dwidth = <0x18>;
xlnx,num-channels = <1>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
};
+ Documentation of "audio_ss_0_audio_formatter_0" node is located
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
index 15c2d64d247c..45214da42ec7 100644
--- a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
@@ -1,28 +1,27 @@
-Device-Tree bindings for Xilinx SPDIF IP
+Device-Tree bindings for Xilinx SPDIF PL IP
-The IP supports playback and capture of SPDIF audio
+The IP supports SPDIF based playback and capture audio
Required properties:
- compatible: "xlnx,spdif-2.0"
- clock-names: List of input clocks.
- Required elements: "s_axi_aclk", "aud_clk_i"
+ Required elements for SPDIF Tx: "aud_clk_i", "s_axi_aclk", "s_axis_aclk".
+ Required elements for SPDIF Rx: "aud_clk_i", "s_axi_aclk", "m_axis_aclk".
- clocks: Input clock specifier. Refer to common clock bindings.
- reg: Base address and address length of the IP core instance.
- interrupts-parent: Phandle for interrupt controller.
- interrupts: List of Interrupt numbers.
- - xlnx,spdif-mode: 0 :- receiver mode
- 1 :- transmitter mode
- - xlnx,aud_clk_i: input audio clock value.
+ - xlnx,spdif-mode: 0 :- receiver mode ; 1 :- transmitter mode
+ - xlnx,snd-pcm: phandle to audio formatter node
-Example:
+Example - SPDIF Rx:
spdif_0: spdif@80010000 {
- clock-names = "aud_clk_i", "s_axi_aclk";
- clocks = <&misc_clk_0>, <&clk 71>;
+ clock-names = "aud_clk_i", "s_axi_aclk", "m_axis_aclk";
+ clocks = <&si570_1>, <&clk 71>, <&clk 71>;
compatible = "xlnx,spdif-2.0";
- interrupt-names = "spdif_interrupt";
interrupt-parent = <&gic>;
interrupts = <0 91 4>;
reg = <0x0 0x80010000 0x0 0x10000>;
xlnx,spdif-mode = <1>;
- xlnx,aud_clk_i = <49152913>;
+ xlnx,snd-pcm = <&audio_formatter_0>;
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
new file mode 100644
index 000000000000..69134458b9d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
@@ -0,0 +1,60 @@
+Device-Tree bindings for Xilinx SDI audio
+
+The IP core supports embed/extract of audio in SDI Tx and Rx
+protocol respectively. Reference to PG:
+https://www.xilinx.com/support/documentation/ip_documentation/v_uhdsdi_audio/v1_0/pg309-v-uhdsdi-audio.pdf
+
+Required properties:
+ - compatible: Should be one of:
+ "xlnx,v-uhdsdi-audio-2.0"
+ "xlnx,v-uhdsdi-audio-1.0"
+ Note: v1.0 (xlnx,v-uhdsdi-audio-1.0) is deprecated
+ and driver no longer supports it. Mandatory to upgrade to v2.0
+ - interrupts: Interrupt number.
+ - interrupts-parent: phandle for interrupt controller.
+ - reg: Base address and size of the IP core instance.
+ - xlnx,snd-pcm: reference to audio formatter block
+ - clock-names: List of input clocks.
+ Required elements for SDI Embed: "s_axi_aclk", "s_axis_clk", "sdi_embed_clk".
+ Required elements for SDI Extract: "s_axi_aclk", "sdi_extract_clk", "m_axis_clk".
+ - clocks: Input clock specifier. Refer to common clock bindings.
+
+SDI embed contains a output port to remote endpoint of SDI video Tx node.
+This pipeline should be described using the DT bindings defined in
+Documentation/devicetree/bindings/graph.txt
+
+Example:
+
+ audio_ss_0_v_uhdsdi_audio_extract_0: v_uhdsdi_audio@80080000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "sdi_extract_clk", "m_axis_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_1>, <&misc_clk_0>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 106 4>;
+ reg = <0x0 0x80080000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ };
+
+ audio_ss_0_v_uhdsdi_audio_embed_0: v_uhdsdi_audio@80090000 {
+ compatible = "xlnx,v-uhdsdi-audio-2.0";
+ clock-names = "s_axi_aclk", "s_axis_clk", "sdi_embed_clk";
+ clocks = <&misc_clk_0>, <&misc_clk_0>, <&misc_clk_1>;
+ interrupt-names = "interrupt";
+ interrupt-parent = <&gic>;
+ interrupts = <0 107 4>;
+ reg = <0x0 0x80090000 0x0 0x10000>;
+ xlnx,snd-pcm = <&audio_ss_0_audio_formatter_0>;
+ sdi_av_port: port@0 {
+ reg = <0>;
+ sditx_audio_embed_src: endpoint {
+ remote-endpoint = <&sdi_audio_sink_port>;
+ };
+ };
+ };
+
+ Node 'v_smpte_uhdsdi_tx_ss' is documented in SDI Tx video bindings,
+ located at Documentation/devicetree/bindings/display/xlnx/xlnx,sdi-tx.txt.
+
+ Node 'audio_ss_0_audio_formatter_0' node is documented
+ at Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index 5f4ed3e5c994..71fab150dfba 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -6,18 +6,29 @@ Required properties:
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
+- fifo-size : Depth of TX/RX Fifos
Optional properties:
-- xlnx,num-ss-bits : Number of chip selects used.
- xlnx,num-transfer-bits : Number of bits per transfer. This will be 8 if not specified
+- num-cs : Number of chip selects used.
+- bits-per-word : Number of bits per word.
+- clock-names : Can be one or more strings from "axi_clk", "axi4_clk"
+ and "spi_clk" depending on IP configurations.
+- clocks : Input clock specifier. Refer to common clock bindings.
+- xlnx,startup-block : Indicates whether startup block is enabled or disabled.
Example:
axi_quad_spi@41e00000 {
compatible = "xlnx,xps-spi-2.00.a";
+ clock-names = "axi_clk", "axi4_clk", "spi_clk";
+ clocks = <&clkc 71>, <&clkc 72>, <&clkc 73>;
interrupt-parent = <&intc>;
interrupts = <0 31 1>;
reg = <0x41e00000 0x10000>;
- xlnx,num-ss-bits = <0x1>;
xlnx,num-transfer-bits = <32>;
+ num-cs = <0x1>;
+ fifo-size = <256>;
+ bits-per-word = <8>;
+ xlnx,startup-block;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
index 0f6d37ff541c..a40827f58164 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.txt
@@ -2,7 +2,8 @@ Xilinx Zynq UltraScale+ MPSoC GQSPI controller Device Tree Bindings
-------------------------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,zynqmp-qspi-1.0".
+- compatible : Should be "xlnx,zynqmp-qspi-1.0" for zynqmp or
+ "xlnx,versal-qspi-1.0" for versal.
- reg : Physical base address and size of GQSPI registers map.
- interrupts : Property with a value describing the interrupt
number.
@@ -12,6 +13,14 @@ Required properties:
Optional properties:
- num-cs : Number of chip selects used.
+- has-io-mode : boolean property describes the controller operating
+ mode. if exists controller will operate in IO mode
+ else dma mode.
+- is-dual : zynqmp qspi support for dual-parallel mode configuration
+ value should be 1.
+- is-stacked : zynqmp qspi support for stacked mode configuration.
+ to enable this mode, is-dual should be 0 and is-stacked
+ should be 1.
Example:
qspi: spi@ff0f0000 {
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
index 4aae5b2cef56..622e27fc0b71 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
@@ -1,7 +1,8 @@
Xilinx SuperSpeed DWC3 USB SoC controller
Required properties:
-- compatible: Should contain "xlnx,zynqmp-dwc3"
+- compatible: May contain "xlnx,zynqmp-dwc3" or "xlnx,versal-dwc3"
+- reg: Base address and length of the register control block
- clocks: A list of phandles for the clocks listed in clock-names
- clock-names: Should contain the following:
"bus_clk" Master/Core clock, have to be >= 125 MHz for SS
@@ -13,20 +14,38 @@ Required child node:
A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
+Optional properties for xlnx,zynqmp-dwc3:
+- nvmem-cells: list of phandle to the nvmem data cells.
+- nvmem-cell-names: Names for the each nvmem-cells specified.
+
+Optional properties for snps,dwc3:
+- dma-coherent: Enable this flag if CCI is enabled in design. Adding this
+ flag configures Global SoC bus Configuration Register and
+ Xilinx USB 3.0 IP - USB coherency register to enable CCI.
+- snps,enable-hibernation: Add this flag to enable hibernation support for
+ peripheral mode
+- interrupt-names: This property provides the names of the interrupt ids used
+
Example device node:
usb@0 {
#address-cells = <0x2>;
#size-cells = <0x1>;
compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
clock-names = "bus_clk" "ref_clk";
clocks = <&clk125>, <&clk125>;
ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
dwc3@fe200000 {
compatible = "snps,dwc3";
reg = <0x0 0xfe200000 0x40000>;
+ interrupt-name = "dwc_usb3";
interrupts = <0x0 0x41 0x4>;
dr_mode = "host";
+ dma-coherent;
+ snps,enable-hibernation
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 9946ff9ba735..6913f1f8e486 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -88,6 +88,19 @@ Optional properties:
- snps,quirk-frame-length-adjustment: Value for GFLADJ_30MHZ field of GFLADJ
register for post-silicon frame length adjustment when the
fladj_30mhz_sdbnd signal is invalid or incorrect.
+ - snps,refclk_fladj: Enable frame length adjustment for SOF/ITP counter.
+ - snps,enable_guctl1_resume_quirk: Adding this flag sets bit 10 of GUCTL1
+ thus enabling the workaround in HW to fix the issue where the controller
+ was not able to generate correct CRC checksum on the very first transfer
+ packet after sending resume signal.
+ - snps,enable_guctl1_ipd_quirk: Adding this flag sets bit 9 of GUCTL1
+ enabling the workaround in HW to reduce the Inter Packet Delay (IPD)
+ and making controller enumerate FS/LS devices connected behind VIA-LAB.
+ - snps,xhci-stream-quirk: Dwc3 host controller has a bug where it sometimes
+ fails to process the traansfer descriptors present in the BULK IN
+ stream ring. Since the controller is not processing any TD, no transfer
+ events will be triggered, resulting in a hang condition. Enabling this
+ flag in dts fixes the above said issue.
- snps,rx-thr-num-pkt-prd: periodic ESS RX packet threshold count - host mode
only. Set this and rx-max-burst-prd to a valid,
non-zero value 1-16 (DWC_usb31 programming guide
@@ -111,6 +124,8 @@ Optional properties:
When just one value, which means INCRX burst mode enabled. When
more than one value, which means undefined length INCR burst type
enabled. The values can be 1, 4, 8, 16, 32, 64, 128 and 256.
+ - snps,mask_phy_reset: enabling this quirk masks phy reset signal from reaching
+ the ULPI phy after initialization done of the phy.
- in addition all properties from usb-xhci.txt from the current directory are
supported as well
diff --git a/Documentation/devicetree/bindings/usb/ehci-xilinx.txt b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
new file mode 100644
index 000000000000..4df7ad6e3541
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-xilinx.txt
@@ -0,0 +1,21 @@
+Xilinx USB EHCI controller
+
+Required properties:
+- compatible: must be "xlnx,xps-usb-host-1.00.a"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The EHCI interrupt
+
+Optional properties:
+- xlnx,ext-vbus-valid: Use external VBUS
+- xlnx,support-usb-fs: Support for Full Speed USB
+- xlnx,use-phy-bus-pwr: Use phy bus power in USB
+
+Example:
+
+ xps_usb_host_0: usb@82400000 {
+ compatible = "xlnx,xps-usb-host-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 0 2 >;
+ reg = < 0x82400000 0x200 >;
+ } ;
diff --git a/Documentation/devicetree/bindings/usb/udc-xilinx.txt b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
index 47b4e397a08d..86f705384132 100644
--- a/Documentation/devicetree/bindings/usb/udc-xilinx.txt
+++ b/Documentation/devicetree/bindings/usb/udc-xilinx.txt
@@ -6,13 +6,16 @@ Required properties:
device registers map.
- interrupts : Should contain single irq line of USB2 device
controller
-- xlnx,has-builtin-dma : if DMA is included
+- xlnx,has-builtin-dma : If DMA is included
-Example:
- axi-usb2-device@42e00000 {
- compatible = "xlnx,usb2-device-4.00.a";
- interrupts = <0x0 0x39 0x1>;
- reg = <0x42e00000 0x10000>;
- xlnx,has-builtin-dma;
- };
+Optional properties:
+- clock-names : Should be "s_axi_aclk"
+- clocks : Input clock specifier. Refer to common clock bindings.
+Example:
+ axi-usb2-device@42e00000 {
+ compatible = "xlnx,usb2-device-4.00.a";
+ interrupts = <0x0 0x39 0x1>;
+ reg = <0x42e00000 0x10000>;
+ xlnx,has-builtin-dma;
+ };
diff --git a/Documentation/devicetree/bindings/video/xilinx-fb.txt b/Documentation/devicetree/bindings/video/xilinx-fb.txt
new file mode 100644
index 000000000000..11a6ba01a032
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/xilinx-fb.txt
@@ -0,0 +1,35 @@
+Xilinx Axi TFT controller Device Tree Bindings
+---------------------------------------------------------
+
+Required properties:
+- compatible : Can be any of the following
+ "xlnx,xps-tft-1.00.a","xlnx,xps-tft-2.00.a",
+ "xlnx,xps-tft-2.01.a","xlnx,plb-tft-cntlr-ref-1.00.a",
+ "xlnx,plb-dvi-cntlr-ref-1.00.c"
+- reg : Physical base address and size of the Axi Tft
+ registers map
+- interrupts : Property with a value describing the interrupt
+ number
+- interrupt-parent : Must be core interrupt controller
+- xlnx,dcr-splb-slave-if : Accessing TFT Controller through Bus or DCR interface.
+ for BUS its value is 1 and for DCR it is 0.
+ default is BUS i.e. 1
+- resolution : <xres yres> pixel resolution of framebuffer.Some
+ implementations use a different resolution
+- virtual-resolution : <xvirt yvirt> Size of framebuffer in memory.
+- rotate-display : (empty) rotate display 180 degrees
+- phys-size : <screen_width_mm screen_height_mm> width and heigth of
+ screen
+
+Example:
+axi_tft_0: axi_tft@44a00000 {
+ compatible = "xlnx,xps-tft-1.00.a";
+ interrupt-parent = <&axi_intc>;
+ interrupts = <1 0>;
+ reg = <0x44a00000 0x10000>;
+ xlnx,dcr-splb-slave-if = <0x1>;
+ resolution = <640 480>;
+ virtual-resolution = <1024 480>;
+ phys-size = <1024 512>;
+ rotate-display;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
index c6ae9c9d5e3e..10d68003158d 100644
--- a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt
@@ -1,21 +1,28 @@
-Xilinx AXI/PLB soft-core watchdog Device Tree Bindings
----------------------------------------------------------
+Xilinx AXI/PLB soft-core watchdog and window watchdog Device Tree Bindings
+--------------------------------------------------------------------------
Required properties:
- compatible : Should be "xlnx,xps-timebase-wdt-1.00.a" or
- "xlnx,xps-timebase-wdt-1.01.a".
+ "xlnx,xps-timebase-wdt-1.01.a" or
+ "xlnx,versal-wwdt-1.0".
- reg : Physical base address and size
Optional properties:
- clocks : Input clock specifier. Refer to common clock
bindings.
- clock-frequency : Frequency of clock in Hz
+
+Optional properties for AXI/PLB soft-core watchdog:
- xlnx,wdt-enable-once : 0 - Watchdog can be restarted
1 - Watchdog can be enabled just once
- xlnx,wdt-interval : Watchdog timeout interval in 2^<val> clock cycles,
<val> is integer from 8 to 31.
+Optional properties for window watchdog:
+- timeout-sec : Watchdog timeout value (in seconds).
+
Example:
+Xilinx AXI/PLB soft-core watchdog:
axi-timebase-wdt@40100000 {
clock-frequency = <50000000>;
compatible = "xlnx,xps-timebase-wdt-1.00.a";
@@ -24,3 +31,11 @@ axi-timebase-wdt@40100000 {
xlnx,wdt-enable-once = <0x0>;
xlnx,wdt-interval = <0x1b>;
} ;
+
+Xilinx Versal window watchdog:
+watchdog@fd4d0000 {
+ compatible = "xlnx,versal-wwdt-1.0";
+ reg = <0x0 0xfd4d0000 0x0 0x10000>;
+ clocks = <&clk25>;
+ timeout-sec = <10>;
+} ;
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index d058ace29345..0c75bb153ca6 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -253,6 +253,7 @@
Optional properties:
- 8-bit (empty) : Set this property for SystemACE in 8 bit mode
+ - port-number = <port_number> : Set port number for particular device
iii) Xilinx EMAC and Xilinx TEMAC
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
new file mode 100644
index 000000000000..8abc053dfa30
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-fb.txt
@@ -0,0 +1,22 @@
+The Xilinx framebuffer DMA engine supports two soft IP blocks: one IP
+block is used for reading video frame data from memory (FB Read) to the device
+and the other IP block is used for writing video frame data from the device
+to memory (FB Write). Both the FB Read/Write IP blocks are aware of the
+format of the data being written to or read from memory including RGB and
+YUV in packed, planar, and semi-planar formats. Because the FB Read/Write
+is format aware, only one buffer pointer is needed by the IP blocks even
+when planar or semi-planar format are used.
+
+Required properties:
+ - compatible: Should be "xlnx,ctrl-fbwr-1.0" for framebuffer Write OR
+ "xlnx,ctrl-fbrd-1.0" for framebuffer Read.
+ - reg: Base address and size of the IP core.
+ - reset-gpios: gpio to reset the framebuffer IP
+
+Example:
+
+ fbwr@0xa0000000 {
+ compatible = "xlnx,ctrl-fbwr-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ reset-gpios = <&gpio 82 1>;
+ };
diff --git a/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
new file mode 100644
index 000000000000..04e6426f4e9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/xlnx,ctrl-vpss.txt
@@ -0,0 +1,21 @@
+The Xilinx VPSS Scaler is a Video IP that supports up scaling, down scaling and
+no scaling functionailty along with color space conversion. This supports custom
+resolution values between 0 to 4096.
+
+Required properties:
+
+- compatible: Must be "xlnx,ctrl-xvpss-1.0".
+- reg: Base address and size of the IP core.
+- reset-gpios: gpio to reset the framebuffer IP
+- xlnx,vpss-taps: number of taps
+- xlnx,vpss-ppc: pixels per clock
+
+Example:
+
+ ctrlvpss: vpss@0xa0200000 {
+ compatible = "xlnx,ctrl-xvpss-1.0";
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ reset-gpios = <&gpio 80 1>;
+ xlnx,vpss-taps = <6>;
+ xlnx,vpss-ppc = <2>;
+ };
diff --git a/Documentation/devicetree/configfs-overlays.txt b/Documentation/devicetree/configfs-overlays.txt
new file mode 100644
index 000000000000..5fa43e064307
--- /dev/null
+++ b/Documentation/devicetree/configfs-overlays.txt
@@ -0,0 +1,31 @@
+Howto use the configfs overlay interface.
+
+A device-tree configfs entry is created in /config/device-tree/overlays
+and and it is manipulated using standard file system I/O.
+Note that this is a debug level interface, for use by developers and
+not necessarily something accessed by normal users due to the
+security implications of having direct access to the kernel's device tree.
+
+* To create an overlay you mkdir the directory:
+
+ # mkdir /config/device-tree/overlays/foo
+
+* Either you echo the overlay firmware file to the path property file.
+
+ # echo foo.dtbo >/config/device-tree/overlays/foo/path
+
+* Or you cat the contents of the overlay to the dtbo file
+
+ # cat foo.dtbo >/config/device-tree/overlays/foo/dtbo
+
+The overlay file will be applied, and devices will be created/destroyed
+as required.
+
+To remove it simply rmdir the directory.
+
+ # rmdir /config/device-tree/overlays/foo
+
+The rationalle of the dual interface (firmware & direct copy) is that each is
+better suited to different use patterns. The firmware interface is what's
+intended to be used by hardware managers in the kernel, while the copy interface
+make sense for developers (since it avoids problems with namespaces).
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 17bfb2beaa6a..c8352848fd3d 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1531,6 +1531,43 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG101010-1X30:
+
+ - MEDIA_BUS_FMT_RBG101010_1X30
+ - 0x1100
+ -
+ - 0
+ - 0
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1638,6 +1675,47 @@ The following table list existing packed 36bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG121212-1X36:
+
+ - MEDIA_BUS_FMT_RBG121212_1X36
+ - 0x1101
+ -
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -1807,6 +1885,78 @@ The following table list existing packed 48bit wide RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RBG161616-1X48:
+
+ - MEDIA_BUS_FMT_RBG161616_1X48
+ - 0x1102
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - r\ :sub:`15`
+ - r\ :sub:`14`
+ - r\ :sub:`13`
+ - r\ :sub:`12`
+ - r\ :sub:`11`
+ - r\ :sub:`10`
+ - r\ :sub:`9`
+ - r\ :sub:`8`
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * -
+ -
+ -
+ - b\ :sub:`15`
+ - b\ :sub:`14`
+ - b\ :sub:`13`
+ - b\ :sub:`12`
+ - b\ :sub:`11`
+ - b\ :sub:`10`
+ - b\ :sub:`9`
+ - b\ :sub:`8`
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`15`
+ - g\ :sub:`14`
+ - g\ :sub:`13`
+ - g\ :sub:`12`
+ - g\ :sub:`11`
+ - g\ :sub:`10`
+ - g\ :sub:`9`
+ - g\ :sub:`8`
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
.. raw:: latex
@@ -5187,6 +5337,148 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY10_4X20:
+
+ - MEDIA_BUS_FMT_VYYUYY10_4X20
+ - 0x2101
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-Y12-1X12:
- MEDIA_BUS_FMT_Y12_1X12
@@ -6981,6 +7273,185 @@ the following codes.
- u\ :sub:`2`
- u\ :sub:`1`
- u\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY12-4X24:
+
+ - MEDIA_BUS_FMT_UYYVYY12_4X24
+ - 0x2103
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VYYUYY8-1X24:
+
+ - MEDIA_BUS_FMT_VYYUYY8_1X24
+ - 0x2100
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-YUV10-1X30:
- MEDIA_BUS_FMT_YUV10_1X30
@@ -7018,6 +7489,43 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY10-1X30:
+
+ - MEDIA_BUS_FMT_VUY10_1X30
+ - 0x2102
+ -
+ -
+ -
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY10-0-5X30:
- MEDIA_BUS_FMT_UYYVYY10_0_5X30
@@ -7090,6 +7598,43 @@ the following codes.
- y\ :sub:`2`
- y\ :sub:`1`
- y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-Y16-1X16:
+
+ - MEDIA_BUS_FMT_Y16_1X16
+ - 0x2105
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-AYUV8-1X32:
- MEDIA_BUS_FMT_AYUV8_1X32
@@ -7127,6 +7672,220 @@ the following codes.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYYVYY16-4X32:
+
+ - MEDIA_BUS_FMT_UYYVYY16_4X32
+ - 0x2106
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-UYVY16-2X32:
+
+ - MEDIA_BUS_FMT_UYVY16_2X32
+ - 0x2108
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
+ * -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7315,6 +8074,47 @@ The following table list existing packed 36bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY12-1X36:
+
+ - MEDIA_BUS_FMT_VUY12_1X36
+ - 0x2104
+ -
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`9`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
.. raw:: latex
@@ -7485,6 +8285,78 @@ The following table list existing packed 48bit wide YUV formats.
- v\ :sub:`2`
- v\ :sub:`1`
- v\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-VUY16-1X48:
+
+ - MEDIA_BUS_FMT_VUY16_1X48
+ - 0x2107
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - v\ :sub:`15`
+ - v\ :sub:`14`
+ - v\ :sub:`13`
+ - v\ :sub:`12`
+ - v\ :sub:`11`
+ - v\ :sub:`10`
+ - v\ :sub:`9`
+ - v\ :sub:`8`
+ - v\ :sub:`7`
+ - v\ :sub:`6`
+ - v\ :sub:`5`
+ - v\ :sub:`4`
+ - v\ :sub:`3`
+ - v\ :sub:`2`
+ - v\ :sub:`1`
+ - v\ :sub:`0`
+ * -
+ -
+ -
+ - u\ :sub:`15`
+ - u\ :sub:`14`
+ - u\ :sub:`13`
+ - u\ :sub:`12`
+ - u\ :sub:`11`
+ - u\ :sub:`10`
+ - u\ :sub:`9`
+ - u\ :sub:`8`
+ - u\ :sub:`7`
+ - u\ :sub:`6`
+ - u\ :sub:`5`
+ - u\ :sub:`4`
+ - u\ :sub:`3`
+ - u\ :sub:`2`
+ - u\ :sub:`1`
+ - u\ :sub:`0`
+ - y\ :sub:`15`
+ - y\ :sub:`14`
+ - y\ :sub:`13`
+ - y\ :sub:`12`
+ - y\ :sub:`11`
+ - y\ :sub:`10`
+ - y\ :sub:`8`
+ - y\ :sub:`8`
+ - y\ :sub:`7`
+ - y\ :sub:`6`
+ - y\ :sub:`5`
+ - y\ :sub:`4`
+ - y\ :sub:`3`
+ - y\ :sub:`2`
+ - y\ :sub:`1`
+ - y\ :sub:`0`
* .. _MEDIA-BUS-FMT-UYYVYY16-0-5X48:
- MEDIA_BUS_FMT_UYYVYY16_0_5X48
diff --git a/Documentation/misc-devices/xilinx_flex.txt b/Documentation/misc-devices/xilinx_flex.txt
new file mode 100644
index 000000000000..c07593466abe
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_flex.txt
@@ -0,0 +1,66 @@
+Kernel driver xilinx_flex
+============================
+
+Supported chips:
+Versal SOC
+
+Author:
+ Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+
+Description
+-----------
+
+Versal uses the Arteris FlexNoC interconnect instead of the ARM NIC. FlexNoC
+provides the capability to integrate performance counters in the interconnect.
+It has configurable probe points to monitor the packet and forwards it to
+observer for logging. It supports read and write transaction counts for
+request and response.
+
+Features:
+---> Run-time programmable selection of packet probe points.
+---> Recording of traffic and link statistics.
+---> Separate read and write response and request count.
+
+SYSFS:
+
+counteridfpd
+ RW - shows the counter number selected for the FPD Flexnoc.
+
+counterfpd_rdreq
+ RO - shows the read request count for the FPD counters.
+
+counterfpdsrc
+ WO - sets the source of the FPD counter.
+
+counterfpd_wrrsp
+ RO - shows the write response count for the FPD counters.
+
+counterfpd_rdrsp
+ RO - shows the read response count for the FPD counters.
+
+counterfpd_wrreq
+ RO - shows the write request count for the FPD counters.
+
+counterfpdport
+ WO - sets the port number selected for the FPD Flexnoc.
+
+counteridlpd
+ RW - shows the counter number selected for the LPD Flexnoc.
+
+counterlpdport
+ WO - sets the port number selected for the LPD Flexnoc.
+
+counterlpd_rdreq
+ RO - shows the read request count for the LPD counters.
+
+counterlpd_wrreq
+ RO - shows the write request count for the LPD counters.
+
+counterlpd_wrrsp
+ RO - shows the write response count for the LPD counters.
+
+counterlpdsrc
+ WO - sets the source of the LPD counter.
+
+counterlpd_rdrsp
+ RO - shows the read response count for the LPD counters.
diff --git a/Documentation/misc-devices/xilinx_trafgen.txt b/Documentation/misc-devices/xilinx_trafgen.txt
new file mode 100644
index 000000000000..dadcdad74df1
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_trafgen.txt
@@ -0,0 +1,97 @@
+Kernel driver xilinx_trafgen
+============================
+
+Supported chips:
+Zynq SOC, Xilinx 7 series fpga's (Virtex,Kintex,Artix)
+
+Data Sheet:
+ http://www.xilinx.com/support/documentation/ip_documentation/axi_traffic_gen/v2_0/pg125-axi-traffic-gen.pdf
+
+Author:
+ Kedareswara Rao Appana <appanad@xilinx.com>
+
+Description
+-----------
+
+AXI Traffic Generator IP is a core that stresses the AXI4 interconnect and other
+AXI4 peripherals in the system. It generates a wide variety of AXI4 transactions
+based on the core programming.
+
+Features:
+---> Configurable option to generate and accept data according to different
+traffic profiles.
+---> Supports dependent/independent transaction between read/write master port
+with configurable delays.
+---> Programmable repeat count for each transaction with
+constant/increment/random address.
+---> External start/stop to generate traffic without processor intervention.
+---> Generates IP-specific traffic on AXI interface for pre-defined protocols.
+
+SYSFS:
+
+id
+ RO - shows the trafgen id.
+
+resource
+ RO - shows the baseaddress for the trafgen.
+
+master_start_stop
+ RW - monitors the master start logic.
+
+config_slave_status
+ RW - configure and monitors the slave status.
+
+err_sts
+ RW - get the error statistics/clear the errors.
+
+err_en
+ WO - enable the errors.
+
+intr_en
+ WO - enable the interrupts.
+
+last_valid_index
+ RW - gets the last valid index value.
+
+config_sts_show
+ RO - show the config status value.
+
+mram_clear
+ WO - clears the master ram.
+
+cram_clear
+ WO - clears the command ram.
+
+pram_clear
+ WO - clears the parameter ram.
+
+static_enable
+ RO - enables the static mode.
+
+static_burst_len
+ RW - gets/sets the static burst length.
+
+static_transferdone
+ RW - monitors the static transfer done status.
+
+reset_static_transferdone
+ RO - resets the static transferdone bit.
+
+stream_cfg
+ RW - sets the stream configuration parameters like delay.
+
+stream_tktsn
+ RW - TSTRB/TKEEP value for the last beat of the
+transfer set n. N can be 1 to 4.
+
+stream_enable
+ RO - enables the streaming mode.
+
+stream_transferlen
+ RW - get/set the streaming mode transfer length.
+
+stream_transfercnt
+ RW - get/set the streaming mode transfer count.
+
+loop_enable
+ RW - get/set loop enable value.
diff --git a/MAINTAINERS b/MAINTAINERS
index 80f67140e4f7..5e194a08da01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2892,8 +2892,11 @@ F: include/uapi/linux/atm*
ATMEL MACB ETHERNET DRIVER
M: Nicolas Ferre <nicolas.ferre@microchip.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+L: git@xilinx.com
S: Supported
F: drivers/net/ethernet/cadence/
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841740/Macb+Driver
ATMEL MAXTOUCH DRIVER
M: Nick Dyer <nick@shmanahar.org>
@@ -3746,6 +3749,13 @@ S: Supported
F: Documentation/filesystems/caching/cachefiles.txt
F: fs/cachefiles/
+CADENCE I2C DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Venkata Visweswarachari Mallavarapu <vmallava@xilinx.com>
+L: git@xilinx.com
+F: drivers/i2c/busses/i2c-cadence.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842160/Cadence+I2C+Driver
+
CADENCE MIPI-CSI2 BRIDGES
M: Maxime Ripard <mripard@kernel.org>
L: linux-media@vger.kernel.org
@@ -4013,6 +4023,7 @@ F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@nxp.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4863,6 +4874,7 @@ F: drivers/usb/dwc2/
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <balbi@kernel.org>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -5782,6 +5794,15 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/gpu/xen-front.rst
F: drivers/gpu/drm/xen/
+DRM DRIVERS FOR XILINX
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/xlnx/
+F: Documentation/devicetree/bindings/display/xlnx/
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
L: dri-devel@lists.freedesktop.org
@@ -9629,6 +9650,12 @@ F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
F: drivers/ata/sata_gemini.h
+LIBATA SATA AHCI CEVA CONTROLLER DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/ata/ahci_ceva.c
+F: Documentation/devicetree/bindings/ata/ahci-ceva.txt
+
LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <hdegoede@redhat.com>
M: Jens Axboe <axboe@kernel.dk>
@@ -17660,6 +17687,7 @@ F: drivers/net/wireless/rndis_wlan.c
USB XHCI DRIVER
M: Mathias Nyman <mathias.nyman@intel.com>
+R: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
L: linux-usb@vger.kernel.org
S: Supported
F: drivers/usb/host/pci-quirks*
@@ -17717,6 +17745,11 @@ F: Documentation/driver-api/uio-howto.rst
F: drivers/uio/
F: include/linux/uio_driver.h
+USERSPACE I/O (UIO) DRIVER FOR XILINX AI ENGINE NPI
+M: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/soc/xilinx/xlnx,ai-engine-npi.txt
+
UTIL-LINUX PACKAGE
M: Karel Zak <kzak@redhat.com>
L: util-linux@vger.kernel.org
@@ -18565,10 +18598,207 @@ F: fs/xfs/
F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h
+XILINX AUDIO FORMATTER (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+F: sound/soc/xilinx/xlnx_formatter_pcm.c
+
+XILINX AXI DMAENGINE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/xilinx_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+
XILINX AXI ETHERNET DRIVER
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
+F: Documentation/devicetree/bindings/net/xilinx_axienet.txt
+
+XILINX AXI SPI DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+S: Maintained
+F: drivers/spi/spi-xilinx.c
+F: Documentation/devicetree/bindings/spi/spi-xilinx.txt
+
+XILINX FLEXNOC PERFORMANCE MONITOR DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Srinivas Goud <srinivas.goud@xilinx.com>
+S: Maintained
+F: drivers/misc/xilinx_flex_pm.c
+F: Documentation/devicetree/bindings/misc/xlnx,flexnoc-pm.txt
+
+XILINX AXI USB DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/usb/gadget/udc/udc-xilinx.c
+F: Documentation/devicetree/bindings/usb/udc-xilinx.txt
+
+XILINX AXI PERFORMANCE MONITOR DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/uio/uio_xilinx_apm.c
+F: Documentation/devicetree/bindings/uio/xilinx_apm.txt
+
+XILINX CAN DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+R: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/can/xilinx_can.c
+F: Documentation/devicetree/bindings/net/can/xilinx_can.txt
+
+XILINX DMA FRAMEBUFFER READ/WRITE DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/dma/xilinx/xilinx_frmbuf.txt
+F: drivers/dma/xilinx/xilinx_frmbuf.c
+F: include/linux/dma/xilinx_frmbuf.h
+
+XILINX EMACLITE DRIVER
+M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/net/ethernet/xilinx/xilinx_emaclite.c
+F: Documentation/devicetree/bindings/net/xilinx_emaclite.txt
+
+XILINX GMII2RGMII DRIVER
+M: Harini Katakam <harini.katakam@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/net/phy/xilinx_gmii2rgmii.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842246/Xilinx+GMII2RGMII+convertor
+
+XILINX GQSPI ZYNQMP SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynqmp-gqspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18841754/Zynqmp+QSPI+Driver
+
+XILINX I2S AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+F: sound/soc/xilinx/xlnx_i2s.c
+
+XILINX MEDIA AXIS SWITCH DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-axi4s-switch.txt
+F: drivers/media/platform/xilinx/xilinx-axis-switch.c
+
+XILINX MEDIA CSI2RXSS DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,csi2rxss.txt
+F: drivers/media/platform/xilinx/xilinx-csi2rxss.c
+F: include/uapi/linux/xilinx-csi2rxss.h
+
+XILINX MEDIA DEMOSAIC DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-demosaic.txt
+F: drivers/media/platform/xilinx/xilinx-demosaic.c
+
+XILINX MEDIA GAMMA DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-gamma-lut.txt
+F: drivers/media/platform/xilinx/xilinx-gamma.c
+F: drivers/media/platform/xilinx/xilinx-gamma-coeff.h
+
+XILINX MEDIA SDI RX DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,sdirxss.txt
+F: drivers/media/platform/xilinx/xilinx-sdirxss.c
+F: include/uapi/linux/xilinx-sdirxss.h
+
+XILINX MEDIA VPSS COLOR SPACE CONVERTER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+L: git@xilinx.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-csc.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-csc.c
+
+XILINX MEDIA VPSS SCALER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+R: Hyun Kwon <hyun.kwon@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/media/xilinx/xlnx,v-vpss-scaler.txt
+F: drivers/media/platform/xilinx/xilinx-vpss-scaler.c
+
+XILINX PL SOUND CARD (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: sound/soc/xilinx/xlnx_pl_snd_card.c
+
+XILINX QSPI ZYNQ SPI DRIVER
+M: Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+R: Amit Kumar Mahapatra <amit.kumar-mahapatra@xilinx.com>
+F: drivers/spi/spi-zynq-qspi.c
+W: https://xilinx-wiki.atlassian.net/wiki/spaces/A/pages/18842262/Zynq+QSPI+Driver
+
+XILINX RTC ZYNQMP DRIVER
+M: Neeli Srinivas <srinivas.neeli@xilinx.com>
+R: Srinivas Goud <srinivas.goud@xilinx.com>
+R: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/rtc/rtc-zynqmp.c
+
+XILINX SD-FEC IP CORES
+M: Derek Kiernan <derek.kiernan@xilinx.com>
+M: Dragan Cvetic <dragan.cvetic@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt
+F: Documentation/misc-devices/xilinx_sdfec.rst
+F: drivers/misc/xilinx_sdfec.c
+F: drivers/misc/Kconfig
+F: drivers/misc/Makefile
+F: include/uapi/misc/xilinx_sdfec.h
+
+XILINX SDI AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,v-uhdsdi-audio.txt
+F: sound/soc/xilinx/xlnx_sdi_audio.c
+
+XILINX SPDIF AUDIO (ASoC) DRIVER
+M: Maruthi Srinivas Bayyavarapu <maruthi.srinivas.bayyavarapu@xilinx.com>
+R: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+L: git@xilinx.com
+F: Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+F: sound/soc/xilinx/xlnx_spdif.c
XILINX CAN DRIVER
M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
@@ -18595,6 +18825,12 @@ L: linux-serial@vger.kernel.org
S: Maintained
F: drivers/tty/serial/uartlite.c
+XILINX UARTPS DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+S: Maintained
+F: drivers/tty/serial/xilinx_uartps.c
+F: Documentation/devicetree/bindings/serial/cdns,uart.txt
+
XILINX VIDEO IP CORES
M: Hyun Kwon <hyun.kwon@xilinx.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
@@ -18605,6 +18841,71 @@ F: Documentation/devicetree/bindings/media/xilinx/
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
+XILINX ZYNQ FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynq-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynq-fpga-mgr.txt
+
+XILINX ZYNQMP AES DRIVER
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-aes.c
+F: Documentation/devicetree/bindings/crypto/xlnx,zynqmp-aes.txt
+
+XILINX ZYNQMP DMAENGINE DRIVER
+M: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+R: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+R: Harini Katakam <harini.katakam@xilinx.com>
+S: Maintained
+F: drivers/dma/xilinx/zynqmp_dma.c
+F: Documentation/devicetree/bindings/dma/xilinx/zynqmp_dma.txt
+
+XILINX ZYNQMP FPGA MANAGER DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/fpga/zynqmp-fpga.c
+F: Documentation/devicetree/bindings/fpga/xilinx-zynqmp-fpga-mgr.txt
+
+ZYNQMP IPI MAILBOX CONTROLLER DRIVER
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: drivers/mailbox/zynqmp-ipi-mailbox.c
+F: include/linux/mailbox/zynqmp-ipi-message.h
+F: Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
+
+XILINX ZYNQMP PHY DRIVER
+M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+S: Maintained
+F: drivers/phy/phy-zynqmp.c
+F: Documentation/devicetree/bindings/phy/phy-zynqmp.txt
+
+XILINX ZYNQMP R5 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
+M: Wendy Liang <wendy.liang@xilinx.com>
+S: Maintained
+F: Documentation/devicetree/bindings/remoteproc/xilinx,zynqmp-r5-remoteproc.txt
+F: drivers/remoteproc/zynqmp_r5_remoteproc.c
+
+XILINX ZYNQMP RSA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-rsa.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-rsa.txt
+
+XILINX ZYNQMP SHA DRIVER
+M: Nava Kishore Manne <nava.manne@xilinx.com>
+M: Kalyani Akula <kalyani.akula@xilinx.com>
+R: Appana Durga Kedareswara rao <appana.durga.rao@xilinx.com>
+S: Maintained
+F: drivers/crypto/zynqmp-sha.c
+F: Documentation/devicetree/bindings/crypto/zynqmp-sha.txt
+
XILLYBUS DRIVER
M: Eli Billauer <eli.billauer@gmail.com>
L: linux-kernel@vger.kernel.org
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index db3899b07992..0cc3f84b37dd 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -1,6 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Xilinx Zynq 7000 DTSI
+ * Describes the hardware common to all Zynq 7000-based boards.
+ *
+ * Copyright (C) 2011 - 2015 Xilinx
*/
/ {
@@ -93,6 +96,7 @@
};
amba: amba {
+ u-boot,dm-pre-reloc;
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -188,6 +192,13 @@
reg = <0xf8006000 0x1000>;
};
+ ocmc: ocmc@f800c000 {
+ compatible = "xlnx,zynq-ocmc-1.0";
+ interrupt-parent = <&intc>;
+ interrupts = <0 3 4>;
+ reg = <0xf800c000 0x1000>;
+ };
+
uart0: serial@e0000000 {
compatible = "xlnx,xuartps", "cdns,uart-r1p8";
status = "disabled";
@@ -230,6 +241,45 @@
#size-cells = <0>;
};
+ qspi: spi@e000d000 {
+ clock-names = "ref_clk", "pclk";
+ clocks = <&clkc 10>, <&clkc 43>;
+ compatible = "xlnx,zynq-qspi-1.0";
+ status = "disabled";
+ interrupt-parent = <&intc>;
+ interrupts = <0 19 4>;
+ reg = <0xe000d000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ smcc: memory-controller@e000e000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ clock-names = "memclk", "apb_pclk";
+ clocks = <&clkc 11>, <&clkc 44>;
+ compatible = "arm,pl353-smc-r2p1", "arm,primecell";
+ interrupt-parent = <&intc>;
+ interrupts = <0 18 4>;
+ ranges ;
+ reg = <0xe000e000 0x1000>;
+ nand0: flash@e1000000 {
+ status = "disabled";
+ compatible = "arm,pl353-nand-r2p1";
+ reg = <0xe1000000 0x1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ nor0: flash@e2000000 {
+ status = "disabled";
+ compatible = "cfi-flash";
+ reg = <0xe2000000 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
gem0: ethernet@e000b000 {
compatible = "cdns,zynq-gem", "cdns,gem";
reg = <0xe000b000 0x1000>;
@@ -273,15 +323,17 @@
};
slcr: slcr@f8000000 {
+ u-boot,dm-pre-reloc;
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,zynq-slcr", "syscon", "simple-mfd";
reg = <0xF8000000 0x1000>;
ranges;
clkc: clkc@100 {
+ u-boot,dm-pre-reloc;
#clock-cells = <1>;
compatible = "xlnx,ps7-clkc";
- fclk-enable = <0>;
+ fclk-enable = <0xf>;
clock-output-names = "armpll", "ddrpll", "iopll", "cpu_6or4x",
"cpu_3or2x", "cpu_2x", "cpu_1x", "ddr2x", "ddr3x",
"dci", "lqspi", "smc", "pcap", "gem0", "gem1",
@@ -330,14 +382,19 @@
devcfg: devcfg@f8007000 {
compatible = "xlnx,zynq-devcfg-1.0";
- reg = <0xf8007000 0x100>;
interrupt-parent = <&intc>;
interrupts = <0 8 4>;
- clocks = <&clkc 12>;
- clock-names = "ref_clk";
+ reg = <0xf8007000 0x100>;
+ clocks = <&clkc 12>, <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ clock-names = "ref_clk", "fclk0", "fclk1", "fclk2", "fclk3";
syscon = <&slcr>;
};
+ efuse: efuse@f800d000 {
+ compatible = "xlnx,zynq-efuse";
+ reg = <0xf800d000 0x20>;
+ };
+
global_timer: timer@f8f00200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xf8f00200 0x20>;
diff --git a/arch/arm/boot/dts/zynq-cc108.dts b/arch/arm/boot/dts/zynq-cc108.dts
index 8b9ab9bba23b..64d73ecbc592 100644
--- a/arch/arm/boot/dts/zynq-cc108.dts
+++ b/arch/arm/boot/dts/zynq-cc108.dts
@@ -18,6 +18,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart0;
+ spi0 = &qspi;
};
chosen {
@@ -52,6 +53,45 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 { /* 16 MB */
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot-bs";
+ reg = <0x0 0x400000>; /* 4MB */
+ };
+ partition@400000 {
+ label = "qspi-linux";
+ reg = <0x400000 0x400000>; /* 4MB */
+ };
+ partition@800000 {
+ label = "qspi-rootfs";
+ reg = <0x800000 0x400000>; /* 4MB */
+ };
+ partition@c00000 {
+ label = "qspi-devicetree";
+ reg = <0xc00000 0x100000>; /* 1MB */
+ };
+ partition@d00000 {
+ label = "qspi-scratch";
+ reg = <0xd00000 0x200000>; /* 2MB */
+ };
+ partition@f00000 {
+ label = "qspi-uboot-env";
+ reg = <0xf00000 0x100000>; /* 1MB */
+ };
+ };
+};
+
&sdhci1 {
status = "okay";
broken-cd ;
@@ -59,6 +99,7 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index 27cd6cb52f1b..e3c64ca487ed 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,7 +14,9 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
+ usb0 = &usb0;
};
memory@0 {
@@ -56,9 +58,12 @@
};
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -85,6 +90,8 @@
phy-handle = <&ethernet_phy>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gem0_default>;
+ phy-reset-gpio = <&gpio0 11 0>;
+ phy-reset-active-low;
ethernet_phy: ethernet-phy@7 {
reg = <7>;
@@ -100,8 +107,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio0 50 0>;
+ sda-gpios = <&gpio0 51 0>;
i2c-mux@74 {
compatible = "nxp,pca9548";
@@ -292,6 +302,19 @@
};
};
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_50_grp", "gpio0_51_grp";
+ slew-rate = <0>;
+ io-standard = <1>;
+ };
+ };
+
pinctrl_sdhci0_default: sdhci0-default {
mux {
groups = "sdio0_2_grp";
@@ -380,13 +403,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts
index 77943c16d33f..5c9f14d4dd46 100644
--- a/arch/arm/boot/dts/zynq-zc706.dts
+++ b/arch/arm/boot/dts/zynq-zc706.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -14,6 +14,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -27,9 +28,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -303,13 +307,51 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
@@ -322,3 +364,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0_default>;
};
+
+&watchdog0 {
+ reset-on-timeout;
+};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm010.dts b/arch/arm/boot/dts/zynq-zc770-xm010.dts
index 0dd352289a45..4e59d0c2ae91 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm010.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm010.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
spi1 = &spi1;
};
@@ -57,7 +58,41 @@
compatible = "atmel,24c02";
reg = <0x52>;
};
+};
+&qspi {
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
};
&sdhci0 {
@@ -85,6 +120,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm011.dts b/arch/arm/boot/dts/zynq-zc770-xm011.dts
index b7f65862c022..76e9ecc0d01e 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm011.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm011.dts
@@ -47,6 +47,47 @@
};
};
+&nand0 {
+ status = "okay";
+ arm,nand-cycle-t0 = <0x4>;
+ arm,nand-cycle-t1 = <0x4>;
+ arm,nand-cycle-t2 = <0x1>;
+ arm,nand-cycle-t3 = <0x2>;
+ arm,nand-cycle-t4 = <0x2>;
+ arm,nand-cycle-t5 = <0x2>;
+ arm,nand-cycle-t6 = <0x4>;
+
+ partition@0 {
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "nand-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "nand-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "nand-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "nand-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x0>;
+ arm,nor-chip-sel0 = <0x0>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -54,6 +95,7 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm012.dts b/arch/arm/boot/dts/zynq-zc770-xm012.dts
index d2359b789eb8..849230ee4072 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm012.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm012.dts
@@ -53,6 +53,47 @@
};
};
+&nor0 {
+ status = "okay";
+ bank-width = <1>;
+ xlnx,sram-cycle-t0 = <0xb>;
+ xlnx,sram-cycle-t1 = <0xb>;
+ xlnx,sram-cycle-t2 = <0x4>;
+ xlnx,sram-cycle-t3 = <0x4>;
+ xlnx,sram-cycle-t4 = <0x3>;
+ xlnx,sram-cycle-t5 = <0x3>;
+ xlnx,sram-cycle-t6 = <0x2>;
+ partition@0 {
+ label = "nor-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "nor-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "nor-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "nor-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "nor-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+};
+
+&smcc {
+ status = "okay";
+ arm,addr25 = <0x1>;
+ arm,nor-chip-sel0 = <0x1>;
+ arm,nor-chip-sel1 = <0x0>;
+ arm,sram-chip-sel0 = <0x0>;
+ arm,sram-chip-sel1 = <0x0>;
+};
+
&spi1 {
status = "okay";
num-cs = <4>;
@@ -60,5 +101,6 @@
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zc770-xm013.dts b/arch/arm/boot/dts/zynq-zc770-xm013.dts
index 4ae2c85df3a0..e2c74675decb 100644
--- a/arch/arm/boot/dts/zynq-zc770-xm013.dts
+++ b/arch/arm/boot/dts/zynq-zc770-xm013.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem1;
i2c0 = &i2c1;
serial0 = &uart0;
+ spi0 = &qspi;
spi1 = &spi0;
};
@@ -58,6 +59,41 @@
};
};
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "n25q128a11";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -74,5 +110,6 @@
};
&uart0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts
index 6a5a93aa6552..2d531a6ea2ca 100644
--- a/arch/arm/boot/dts/zynq-zed.dts
+++ b/arch/arm/boot/dts/zynq-zed.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -26,9 +27,12 @@
stdout-path = "serial0:115200n8";
};
- usb_phy0: phy0 {
- compatible = "usb-nop-xceiv";
+ usb_phy0: phy0@e0002000 {
+ compatible = "ulpi-phy";
#phy-cells = <0>;
+ reg = <0xe0002000 0x1000>;
+ view-port = <0x0170>;
+ drv-vbus;
};
};
@@ -47,11 +51,50 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+ is-dual = <0>;
+ num-cs = <1>;
+ flash@0 {
+ compatible = "spansion,s25fl256s1", "jedec,spi-nor";
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xC00000 0x400000>;
+ };
+ };
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/zynq-zybo.dts b/arch/arm/boot/dts/zynq-zybo.dts
index 755f6f109d5a..0ac54ebbdc8b 100644
--- a/arch/arm/boot/dts/zynq-zybo.dts
+++ b/arch/arm/boot/dts/zynq-zybo.dts
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2011 - 2015 Xilinx
* Copyright (C) 2012 National Instruments Corp.
*/
/dts-v1/;
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -48,11 +49,18 @@
};
};
+&qspi {
+ u-boot,dm-pre-reloc;
+ status = "okay";
+};
+
&sdhci0 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
&uart1 {
+ u-boot,dm-pre-reloc;
status = "okay";
};
diff --git a/arch/arm/configs/xilinx_zynq_defconfig b/arch/arm/configs/xilinx_zynq_defconfig
new file mode 100644
index 000000000000..ce07e7dcff4b
--- /dev/null
+++ b/arch/arm/configs/xilinx_zynq_defconfig
@@ -0,0 +1,239 @@
+CONFIG_LOCALVERSION="-xilinx"
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_BUG is not set
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_ZYNQ=y
+CONFIG_PL310_ERRATA_588369=y
+CONFIG_PL310_ERRATA_727915=y
+CONFIG_PL310_ERRATA_769419=y
+CONFIG_ARM_ERRATA_754322=y
+CONFIG_ARM_ERRATA_754327=y
+CONFIG_ARM_ERRATA_764369=y
+CONFIG_ARM_ERRATA_775420=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_SYN_COOKIES=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_PL353=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_XILINX_TRAFGEN=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_NETDEVICES=y
+CONFIG_MACB=y
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_E1000E=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MARVELL_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQ_QSPI=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_UCD9200=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_ADV7604=y
+CONFIG_DRM=y
+CONFIG_DRM_XLNX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_ADI=y
+CONFIG_SND_SOC_ADI_AXI_I2S=y
+CONFIG_SND_SOC_ADI_AXI_SPDIF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ZERO=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PCF8563=y
+CONFIG_PL330_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_DMA_ENGINES=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=y
+CONFIG_UIO_XILINX_APM=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQ_REMOTEPROC=m
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_INTC=y
+CONFIG_RAS=y
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_FPGA=y
+CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_EXT3_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DMA_CMA=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 7a88f160b1fb..9089de368da8 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -6,8 +6,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
-#define NR_IPI 7
+#define NR_IPI 16
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a91f21e3c5b5..bbdfd74ff98a 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -120,4 +120,7 @@ struct of_cpu_method {
*/
extern void smp_set_ops(const struct smp_operations *);
+extern int set_ipi_handler(int ipinr, void *handler, char *desc);
+extern void clear_ipi_handler(int ipinr);
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46e1be9e57a8..045b8820524c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -510,20 +510,59 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
__smp_cross_call = fn;
}
-static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_COMPLETION, "completion interrupts"),
+struct ipi {
+ const char *desc;
+ void (*handler)(void);
+};
+
+static void ipi_cpu_stop(void);
+static void ipi_complete(void);
+
+#define IPI_DESC_STRING_IPI_WAKEUP "CPU wakeup interrupts"
+#define IPI_DESC_STRING_IPI_TIMER "Timer broadcast interrupts"
+#define IPI_DESC_STRING_IPI_RESCHEDULE "Rescheduling interrupts"
+#define IPI_DESC_STRING_IPI_CALL_FUNC "Function call interrupts"
+#define IPI_DESC_STRING_IPI_CPU_STOP "CPU stop interrupts"
+#define IPI_DESC_STRING_IPI_IRQ_WORK "IRQ work interrupts"
+#define IPI_DESC_STRING_IPI_COMPLETION "completion interrupts"
+
+#define IPI_DESC_STR(x) IPI_DESC_STRING_ ## x
+
+static const char* ipi_desc_strings[] __tracepoint_string =
+ {
+ [IPI_WAKEUP] = IPI_DESC_STR(IPI_WAKEUP),
+ [IPI_TIMER] = IPI_DESC_STR(IPI_TIMER),
+ [IPI_RESCHEDULE] = IPI_DESC_STR(IPI_RESCHEDULE),
+ [IPI_CALL_FUNC] = IPI_DESC_STR(IPI_CALL_FUNC),
+ [IPI_CPU_STOP] = IPI_DESC_STR(IPI_CPU_STOP),
+ [IPI_IRQ_WORK] = IPI_DESC_STR(IPI_IRQ_WORK),
+ [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION),
+ };
+
+
+static void tick_receive_broadcast_local(void)
+{
+ tick_receive_broadcast();
+}
+
+static struct ipi ipi_types[NR_IPI] = {
+#define S(x, f) [x].desc = IPI_DESC_STR(x), [x].handler = f
+ S(IPI_WAKEUP, NULL),
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ S(IPI_TIMER, tick_receive_broadcast_local),
+#endif
+ S(IPI_RESCHEDULE, scheduler_ipi),
+ S(IPI_CALL_FUNC, generic_smp_call_function_interrupt),
+ S(IPI_CPU_STOP, ipi_cpu_stop),
+#ifdef CONFIG_IRQ_WORK
+ S(IPI_IRQ_WORK, irq_work_run),
+#endif
+ S(IPI_COMPLETION, ipi_complete),
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ trace_ipi_raise_rcuidle(target, ipi_desc_strings[ipinr]);
__smp_cross_call(target, ipinr);
}
@@ -532,13 +571,13 @@ void show_ipi_list(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
-
- for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
-
- seq_printf(p, " %s\n", ipi_types[i]);
+ if (ipi_types[i].handler) {
+ seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, " %s\n", ipi_types[i].desc);
+ }
}
}
@@ -588,8 +627,10 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
-static void ipi_cpu_stop(unsigned int cpu)
+static void ipi_cpu_stop(void)
{
+ unsigned int cpu = smp_processor_id();
+
if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
@@ -616,8 +657,10 @@ int register_ipi_completion(struct completion *completion, int cpu)
return IPI_COMPLETION;
}
-static void ipi_complete(unsigned int cpu)
+static void ipi_complete(void)
{
+ unsigned int cpu = smp_processor_id();
+
complete(per_cpu(cpu_completion, cpu));
}
@@ -634,71 +677,48 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ if (ipi_types[ipinr].handler) {
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
-
- switch (ipinr) {
- case IPI_WAKEUP:
- break;
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- case IPI_TIMER:
- irq_enter();
- tick_receive_broadcast();
- irq_exit();
- break;
-#endif
-
- case IPI_RESCHEDULE:
- scheduler_ipi();
- break;
-
- case IPI_CALL_FUNC:
irq_enter();
- generic_smp_call_function_interrupt();
+ (*ipi_types[ipinr].handler)();
irq_exit();
- break;
+ } else
+ pr_debug("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
- case IPI_CPU_STOP:
- irq_enter();
- ipi_cpu_stop(cpu);
- irq_exit();
- break;
+ set_irq_regs(old_regs);
+}
-#ifdef CONFIG_IRQ_WORK
- case IPI_IRQ_WORK:
- irq_enter();
- irq_work_run();
- irq_exit();
- break;
-#endif
+/*
+ * set_ipi_handler:
+ * Interface provided for a kernel module to specify an IPI handler function.
+ */
+int set_ipi_handler(int ipinr, void *handler, char *desc)
+{
+ unsigned int cpu = smp_processor_id();
- case IPI_COMPLETION:
- irq_enter();
- ipi_complete(cpu);
- irq_exit();
- break;
+ if (ipi_types[ipinr].handler) {
+ pr_crit("CPU%u: IPI handler 0x%x already registered to %pf\n",
+ cpu, ipinr, ipi_types[ipinr].handler);
+ return -1;
+ }
- case IPI_CPU_BACKTRACE:
- printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
- printk_nmi_exit();
- break;
+ ipi_types[ipinr].handler = handler;
+ ipi_types[ipinr].desc = desc;
- default:
- pr_crit("CPU%u: Unknown IPI message 0x%x\n",
- cpu, ipinr);
- break;
- }
+ return 0;
+}
+EXPORT_SYMBOL(set_ipi_handler);
- if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
- set_irq_regs(old_regs);
+/*
+ * clear_ipi_handler:
+ * Interface provided for a kernel module to clear an IPI handler function.
+ */
+void clear_ipi_handler(int ipinr)
+{
+ ipi_types[ipinr].handler = NULL;
+ ipi_types[ipinr].desc = NULL;
}
+EXPORT_SYMBOL(clear_ipi_handler);
void smp_send_reschedule(int cpu)
{
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 1ca633e3d024..557bfe794d29 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -17,3 +17,19 @@ config ARCH_ZYNQ
select SOC_BUS
help
Support for Xilinx Zynq ARM Cortex A9 Platform
+
+if ARCH_ZYNQ
+
+menu "Xilinx Specific Options"
+
+config XILINX_PREFETCH
+ bool "Cache Prefetch"
+ default y
+ help
+ This option turns on L1 & L2 cache prefetching to get the best performance
+ in many cases. This may not always be the best performance depending on
+ the usage.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-zynq/Makefile b/arch/arm/mach-zynq/Makefile
index 9df74cd85fd0..374207c87f96 100644
--- a/arch/arm/mach-zynq/Makefile
+++ b/arch/arm/mach-zynq/Makefile
@@ -4,5 +4,10 @@
#
# Common support
-obj-y := common.o slcr.o pm.o
+obj-y := common.o efuse.o slcr.o zynq_ocm.o pm.o
+
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+ORIG_AFLAGS := $(KBUILD_AFLAGS)
+KBUILD_AFLAGS = $(subst -march=armv6k,,$(ORIG_AFLAGS))
+AFLAGS_suspend.o +=-Wa,-march=armv7-a -mcpu=cortex-a9
+obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index a9dd2f71cd19..8182c27744be 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -95,6 +95,7 @@ static void __init zynq_init_late(void)
{
zynq_core_pm_init();
zynq_pm_late_init();
+ zynq_prefetch_init();
}
/**
@@ -175,6 +176,7 @@ static void __init zynq_map_io(void)
static void __init zynq_irq_init(void)
{
+ zynq_early_efuse_init();
zynq_early_slcr_init();
irqchip_init();
}
@@ -186,8 +188,13 @@ static const char * const zynq_dt_match[] = {
DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
/* 64KB way size, 8-way associativity, parity disabled */
- .l2c_aux_val = 0x00400000,
+#ifdef CONFIG_XILINX_PREFETCH
+ .l2c_aux_val = 0x30400000,
+ .l2c_aux_mask = 0xcfbfffff,
+#else
+ .l2c_aux_val = 0x00400000,
.l2c_aux_mask = 0xffbfffff,
+#endif
.smp = smp_ops(zynq_smp_ops),
.map_io = zynq_map_io,
.init_irq = zynq_irq_init,
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index 60e662324699..5816d57e5a5d 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -15,8 +15,12 @@ extern void zynq_slcr_cpu_stop(int cpu);
extern void zynq_slcr_cpu_start(int cpu);
extern bool zynq_slcr_cpu_state_read(int cpu);
extern void zynq_slcr_cpu_state_write(int cpu, bool die);
+extern u32 zynq_slcr_get_ocm_config(void);
extern u32 zynq_slcr_get_device_id(void);
+extern bool zynq_efuse_cpu_state(int cpu);
+extern int zynq_early_efuse_init(void);
+
#ifdef CONFIG_SMP
extern char zynq_secondary_trampoline;
extern char zynq_secondary_trampoline_jump;
@@ -25,9 +29,31 @@ extern int zynq_cpun_start(u32 address, int cpu);
extern const struct smp_operations zynq_smp_ops;
#endif
+extern void zynq_slcr_init_preload_fpga(void);
+extern void zynq_slcr_init_postload_fpga(void);
+
+extern void __iomem *zynq_slcr_base;
extern void __iomem *zynq_scu_base;
void zynq_pm_late_init(void);
+extern unsigned int zynq_sys_suspend_sz;
+int zynq_sys_suspend(void __iomem *ddrc_base, void __iomem *slcr_base);
+
+static inline void zynq_prefetch_init(void)
+{
+ /*
+ * Enable prefetching in aux control register. L2 prefetch must
+ * only be enabled if the slave supports it (PL310 does)
+ */
+ asm volatile ("mrc p15, 0, r1, c1, c0, 1\n"
+#ifdef CONFIG_XILINX_PREFETCH
+ "orr r1, r1, #6\n"
+#else
+ "bic r1, r1, #6\n"
+#endif
+ "mcr p15, 0, r1, c1, c0, 1\n"
+ : : : "r1");
+}
static inline void zynq_core_pm_init(void)
{
diff --git a/arch/arm/mach-zynq/efuse.c b/arch/arm/mach-zynq/efuse.c
new file mode 100644
index 000000000000..d31a5822ec65
--- /dev/null
+++ b/arch/arm/mach-zynq/efuse.c
@@ -0,0 +1,75 @@
+/*
+ * Xilinx EFUSE driver
+ *
+ * Copyright (c) 2016 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include "common.h"
+
+#define EFUSE_STATUS_OFFSET 0x10
+
+/* 0 means cpu1 is working, 1 means cpu1 is broken */
+#define EFUSE_STATUS_CPU_BIT BIT(7)
+
+void __iomem *zynq_efuse_base;
+
+/**
+ * zynq_efuse_cpu_state - Read/write cpu state
+ * @cpu: cpu number
+ *
+ * Return: true if cpu is running, false if cpu is broken
+ */
+bool zynq_efuse_cpu_state(int cpu)
+{
+ u32 state;
+
+ if (!cpu)
+ return true;
+
+ state = readl(zynq_efuse_base + EFUSE_STATUS_OFFSET);
+ state &= EFUSE_STATUS_CPU_BIT;
+
+ if (!state)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynq_early_efuse_init - Early efuse init function
+ *
+ * Return: 0 on success, negative errno otherwise.
+ *
+ * Called very early during boot from platform code.
+ */
+int __init zynq_early_efuse_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-efuse");
+ if (!np) {
+ pr_err("%s: no efuse node found\n", __func__);
+ BUG();
+ }
+
+ zynq_efuse_base = of_iomap(np, 0);
+ if (!zynq_efuse_base) {
+ pr_err("%s: Unable to map I/O memory\n", __func__);
+ BUG();
+ }
+
+ np->data = (__force void *)zynq_efuse_base;
+
+ pr_info("%s mapped to %p\n", np->name, zynq_efuse_base);
+
+ of_node_put(np);
+
+ return 0;
+}
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 68ec303fa278..4568f68c3c07 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -83,6 +83,9 @@ EXPORT_SYMBOL(zynq_cpun_start);
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
+ if (!zynq_efuse_cpu_state(cpu))
+ return -1;
+
return zynq_cpun_start(__pa_symbol(secondary_startup_arm), cpu);
}
@@ -115,6 +118,7 @@ static void __init zynq_smp_prepare_cpus(unsigned int max_cpus)
static void zynq_secondary_init(unsigned int cpu)
{
zynq_core_pm_init();
+ zynq_prefetch_init();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/mach-zynq/pm.c b/arch/arm/mach-zynq/pm.c
index 8ba450ab559c..b9445a654b59 100644
--- a/arch/arm/mach-zynq/pm.c
+++ b/arch/arm/mach-zynq/pm.c
@@ -7,6 +7,14 @@
* Sören Brinkmann <soren.brinkmann@xilinx.com>
*/
+#include <linux/clk/zynq.h>
+#include <linux/genalloc.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/map.h>
+#include <asm/suspend.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -22,6 +30,165 @@
static void __iomem *ddrc_base;
+#ifdef CONFIG_SUSPEND
+static int (*zynq_suspend_ptr)(void __iomem *, void __iomem *);
+
+static int zynq_pm_prepare_late(void)
+{
+ return zynq_clk_suspend_early();
+}
+
+static void zynq_pm_wake(void)
+{
+ zynq_clk_resume_late();
+}
+
+static int zynq_pm_suspend(unsigned long arg)
+{
+ u32 reg;
+ int do_ddrpll_bypass = 1;
+
+ /* Topswitch clock stop disable */
+ zynq_clk_topswitch_disable();
+
+ if (!zynq_suspend_ptr || !ddrc_base) {
+ do_ddrpll_bypass = 0;
+ } else {
+ /* enable DDRC self-refresh mode */
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg |= DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ if (do_ddrpll_bypass) {
+ /*
+ * Going this way will turn off DDR related clocks and the DDR
+ * PLL. I.e. We might brake sub systems relying on any of this
+ * clocks. And even worse: If there are any other masters in the
+ * system (e.g. in the PL) accessing DDR they are screwed.
+ */
+ flush_cache_all();
+ if (zynq_suspend_ptr(ddrc_base, zynq_slcr_base))
+ pr_warn("DDR self refresh failed.\n");
+ } else {
+ WARN_ONCE(1, "DRAM self-refresh not available\n");
+ cpu_do_idle();
+ }
+
+ /* disable DDRC self-refresh mode */
+ if (do_ddrpll_bypass) {
+ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS);
+ reg &= ~DDRC_SELFREFRESH_MASK;
+ writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS);
+ }
+
+ /* Topswitch clock stop enable */
+ zynq_clk_topswitch_enable();
+
+ return 0;
+}
+
+static int zynq_pm_enter(suspend_state_t suspend_state)
+{
+ switch (suspend_state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ cpu_suspend(0, zynq_pm_suspend);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct platform_suspend_ops zynq_pm_ops = {
+ .prepare_late = zynq_pm_prepare_late,
+ .enter = zynq_pm_enter,
+ .wake = zynq_pm_wake,
+ .valid = suspend_valid_only_mem,
+};
+
+/**
+ * zynq_pm_remap_ocm() - Remap OCM
+ * Returns a pointer to the mapped memory or NULL.
+ *
+ * Remap the OCM.
+ */
+static void __iomem *zynq_pm_remap_ocm(void)
+{
+ struct device_node *np;
+ const char *comp = "xlnx,zynq-ocmc-1.0";
+ void __iomem *base = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, comp);
+ if (np) {
+ struct device *dev;
+ unsigned long pool_addr;
+ unsigned long pool_addr_virt;
+ struct gen_pool *pool;
+
+ of_node_put(np);
+
+ dev = &(of_find_device_by_node(np)->dev);
+
+ /* Get OCM pool from device tree or platform data */
+ pool = gen_pool_get(dev, NULL);
+ if (!pool) {
+ pr_warn("%s: OCM pool is not available\n", __func__);
+ return NULL;
+ }
+
+ pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz);
+ if (!pool_addr_virt) {
+ pr_warn("%s: Can't get OCM poll\n", __func__);
+ return NULL;
+ }
+ pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt);
+ if (!pool_addr) {
+ pr_warn("%s: Can't get physical address of OCM pool\n",
+ __func__);
+ return NULL;
+ }
+ base = __arm_ioremap_exec(pool_addr, zynq_sys_suspend_sz,
+ MT_MEMORY_RWX);
+ if (!base) {
+ pr_warn("%s: IOremap OCM pool failed\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp,
+ pool_addr_virt, (unsigned long)base);
+ } else {
+ pr_warn("%s: no compatible node found for '%s'\n", __func__,
+ comp);
+ }
+
+ return base;
+}
+
+static void zynq_pm_suspend_init(void)
+{
+ void __iomem *ocm_base = zynq_pm_remap_ocm();
+
+ if (!ocm_base) {
+ pr_warn("%s: Unable to map OCM.\n", __func__);
+ } else {
+ /*
+ * Copy code to suspend system into OCM. The suspend code
+ * needs to run from OCM as DRAM may no longer be available
+ * when the PLL is stopped.
+ */
+ zynq_suspend_ptr = fncpy((__force void *)ocm_base,
+ (__force void *)&zynq_sys_suspend,
+ zynq_sys_suspend_sz);
+ }
+
+ suspend_set_ops(&zynq_pm_ops);
+}
+#else /* CONFIG_SUSPEND */
+static void zynq_pm_suspend_init(void) { };
+#endif /* CONFIG_SUSPEND */
+
/**
* zynq_pm_ioremap() - Create IO mappings
* @comp: DT compatible string
@@ -68,4 +235,7 @@ void __init zynq_pm_late_init(void)
reg |= DDRC_CLOCKSTOP_MASK;
writel(reg, ddrc_base + DDRC_DRAM_PARAM_REG3_OFFS);
}
+
+ /* set up suspend */
+ zynq_pm_suspend_init();
}
diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c
index 37707614885a..18a36c48db2e 100644
--- a/arch/arm/mach-zynq/slcr.c
+++ b/arch/arm/mach-zynq/slcr.c
@@ -16,10 +16,13 @@
/* register offsets */
#define SLCR_UNLOCK_OFFSET 0x8 /* SCLR unlock register */
#define SLCR_PS_RST_CTRL_OFFSET 0x200 /* PS Software Reset Control */
+#define SLCR_FPGA_RST_CTRL_OFFSET 0x240 /* FPGA Software Reset Control */
#define SLCR_A9_CPU_RST_CTRL_OFFSET 0x244 /* CPU Software Reset Control */
#define SLCR_REBOOT_STATUS_OFFSET 0x258 /* PS Reboot Status */
#define SLCR_PSS_IDCODE 0x530 /* PS IDCODE */
#define SLCR_L2C_RAM 0xA1C /* L2C_RAM in AR#54190 */
+#define SLCR_LVL_SHFTR_EN_OFFSET 0x900 /* Level Shifters Enable */
+#define SLCR_OCM_CFG_OFFSET 0x910 /* OCM Address Mapping */
#define SLCR_UNLOCK_MAGIC 0xDF0D
#define SLCR_A9_CPU_CLKSTOP 0x10
@@ -27,7 +30,7 @@
#define SLCR_PSS_IDCODE_DEVICE_SHIFT 12
#define SLCR_PSS_IDCODE_DEVICE_MASK 0x1F
-static void __iomem *zynq_slcr_base;
+void __iomem *zynq_slcr_base;
static struct regmap *zynq_slcr_regmap;
/**
@@ -116,6 +119,48 @@ static struct notifier_block zynq_slcr_restart_nb = {
};
/**
+ * zynq_slcr_get_ocm_config - Get SLCR OCM config
+ *
+ * return: OCM config bits
+ */
+u32 zynq_slcr_get_ocm_config(void)
+{
+ u32 ret;
+
+ zynq_slcr_read(&ret, SLCR_OCM_CFG_OFFSET);
+ return ret;
+}
+
+/**
+ * zynq_slcr_init_preload_fpga - Disable communication from the PL to PS.
+ */
+void zynq_slcr_init_preload_fpga(void)
+{
+ /* Assert FPGA top level output resets */
+ zynq_slcr_write(0xF, SLCR_FPGA_RST_CTRL_OFFSET);
+
+ /* Disable level shifters */
+ zynq_slcr_write(0, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Enable output level shifters */
+ zynq_slcr_write(0xA, SLCR_LVL_SHFTR_EN_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_preload_fpga);
+
+/**
+ * zynq_slcr_init_postload_fpga - Re-enable communication from the PL to PS.
+ */
+void zynq_slcr_init_postload_fpga(void)
+{
+ /* Enable level shifters */
+ zynq_slcr_write(0xf, SLCR_LVL_SHFTR_EN_OFFSET);
+
+ /* Deassert AXI interface resets */
+ zynq_slcr_write(0, SLCR_FPGA_RST_CTRL_OFFSET);
+}
+EXPORT_SYMBOL(zynq_slcr_init_postload_fpga);
+
+/**
* zynq_slcr_cpu_start - Start cpu
* @cpu: cpu number
*/
diff --git a/arch/arm/mach-zynq/suspend.S b/arch/arm/mach-zynq/suspend.S
new file mode 100644
index 000000000000..f3f8440e8018
--- /dev/null
+++ b/arch/arm/mach-zynq/suspend.S
@@ -0,0 +1,185 @@
+/*
+ * Suspend support for Zynq
+ *
+ * Copyright (C) 2012 Xilinx
+ *
+ * Soren Brinkmann <soren.brinkmann@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define ARMPLL_CTRL_OFFS 0x100
+#define DDRPLL_CTRL_OFFS 0x104
+#define PLLSTATUS_OFFS 0x10c
+#define DDR_CLK_CTRL_OFFS 0x124
+#define DCI_CLK_CTRL_OFFS 0x128
+#define MODE_STS_OFFS 0x54
+
+#define PLL_RESET_MASK 1
+#define PLL_PWRDWN_MASK (1 << 1)
+#define PLL_BYPASS_MASK (1 << 4)
+#define DCICLK_ENABLE_MASK 1
+#define DDRCLK_ENABLE_MASK 3
+#define ARM_LOCK_MASK (1 << 0)
+#define DDR_LOCK_MASK (1 << 1)
+#define DDRC_STATUS_MASK 7
+
+#define DDRC_OPMODE_SR 3
+#define MAXTRIES 100
+
+ .text
+ .align 3
+
+/**
+ * zynq_sys_suspend - Enter suspend
+ * @ddrc_base: Base address of the DDRC
+ * @slcr_base: Base address of the SLCR
+ * Returns -1 if DRAM subsystem is not gated off, 0 otherwise.
+ *
+ * This function is moved into OCM and finishes the suspend operation. I.e. DDR
+ * related clocks are gated off and the DDR PLL is bypassed.
+ */
+ENTRY(zynq_sys_suspend)
+ push {r4 - r7}
+
+ /* Check DDRC is in self-refresh mode */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ and r2, #DDRC_STATUS_MASK
+ cmp r2, #DDRC_OPMODE_SR
+ movweq r3, #0xff00
+ bne suspend
+
+ mov r3, #MAXTRIES
+ movw r4, #0xfff0
+ movt r4, #0x1f
+ /* Wait for command queue empty */
+1: subs r3, #1
+ movweq r3, #0xff00
+ beq suspend
+ dsb sy
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ bne 1b
+
+ dsb sy
+
+ /*
+ * Wait for DDRC pipeline/queues to drain.
+ * We should wait ~40 DDR cycles. DDR is still at full speed while the
+ * CPU might already run in PLL bypass mode. The fastest speed the CPU
+ * runs at is ~1 GHz ~ 2 * DDR speed.
+ */
+ mov r3, #160
+1: nop
+ subs r3, #1
+ bne 1b
+
+ dsb sy
+
+ /* read back CAM status once more */
+ ldr r2, [r0, #MODE_STS_OFFS]
+ ands r2, r4
+ movwne r3, #0xff00
+ bne suspend
+
+ /* Stop DDR clocks */
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ bic r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ bic r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ /* Bypass and powerdown DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ /* Bypass and powerdown ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ orr r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+suspend:
+ dsb sy
+ wfi
+ dsb sy
+ cmp r3, #0xff00
+ moveq r0, #-1
+ beq exit
+
+ /* Power up ARM PLL */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #ARM_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable ARM PLL bypass */
+ ldr r2, [r1, #ARMPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #ARMPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Power up DDR PLL */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #(PLL_PWRDWN_MASK | PLL_RESET_MASK)
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+ /* wait for lock */
+1: ldr r2, [r1, #PLLSTATUS_OFFS]
+ ands r2, #DDR_LOCK_MASK
+ beq 1b
+
+ dsb sy
+
+ /* Disable DDR PLL bypass */
+ ldr r2, [r1, #DDRPLL_CTRL_OFFS]
+ bic r2, #PLL_BYPASS_MASK
+ str r2, [r1, #DDRPLL_CTRL_OFFS]
+
+ dmb st
+
+ /* Start DDR clocks */
+ ldr r2, [r1, #DCI_CLK_CTRL_OFFS]
+ orr r2, #DCICLK_ENABLE_MASK
+ str r2, [r1, #DCI_CLK_CTRL_OFFS]
+
+ dmb st
+
+ ldr r2, [r1, #DDR_CLK_CTRL_OFFS]
+ orr r2, #DDRCLK_ENABLE_MASK
+ str r2, [r1, #DDR_CLK_CTRL_OFFS]
+
+ dsb sy
+
+ mov r0, #0
+exit: pop {r4 - r7}
+ bx lr
+
+ENTRY(zynq_sys_suspend_sz)
+ .word . - zynq_sys_suspend
+
+ ENDPROC(zynq_sys_suspend)
diff --git a/arch/arm/mach-zynq/zynq_ocm.c b/arch/arm/mach-zynq/zynq_ocm.c
new file mode 100644
index 000000000000..324b7c125bf5
--- /dev/null
+++ b/arch/arm/mach-zynq/zynq_ocm.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2013 Xilinx
+ *
+ * Based on "Generic on-chip SRAM allocation driver"
+ *
+ * Copyright (C) 2012 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/genalloc.h>
+
+#include "common.h"
+
+#define ZYNQ_OCM_HIGHADDR 0xfffc0000
+#define ZYNQ_OCM_LOWADDR 0x0
+#define ZYNQ_OCM_BLOCK_SIZE 0x10000
+#define ZYNQ_OCM_BLOCKS 4
+#define ZYNQ_OCM_GRANULARITY 32
+
+#define ZYNQ_OCM_PARITY_CTRL 0x0
+#define ZYNQ_OCM_PARITY_ENABLE 0x1e
+
+#define ZYNQ_OCM_PARITY_ERRADDRESS 0x4
+
+#define ZYNQ_OCM_IRQ_STS 0x8
+#define ZYNQ_OCM_IRQ_STS_ERR_MASK 0x7
+
+struct zynq_ocm_dev {
+ void __iomem *base;
+ int irq;
+ struct gen_pool *pool;
+ struct resource res[ZYNQ_OCM_BLOCKS];
+};
+
+/**
+ * zynq_ocm_irq_handler - Interrupt service routine of the OCM controller
+ * @irq: IRQ number
+ * @data: Pointer to the zynq_ocm_dev structure
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t zynq_ocm_irq_handler(int irq, void *data)
+{
+ u32 sts;
+ u32 err_addr;
+ struct zynq_ocm_dev *zynq_ocm = data;
+
+ /* check status */
+ sts = readl(zynq_ocm->base + ZYNQ_OCM_IRQ_STS);
+ if (sts & ZYNQ_OCM_IRQ_STS_ERR_MASK) {
+ /* check error address */
+ err_addr = readl(zynq_ocm->base + ZYNQ_OCM_PARITY_ERRADDRESS);
+ pr_err("%s: OCM err intr generated at 0x%04x (stat: 0x%08x).",
+ __func__, err_addr, sts & ZYNQ_OCM_IRQ_STS_ERR_MASK);
+ return IRQ_HANDLED;
+ }
+ pr_warn("%s: Interrupt generated by OCM, but no error is found.",
+ __func__);
+
+ return IRQ_NONE;
+}
+
+/**
+ * zynq_ocm_probe - Probe method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct zynq_ocm_dev *zynq_ocm;
+ u32 i, ocm_config, curr;
+ struct resource *res;
+
+ ocm_config = zynq_slcr_get_ocm_config();
+
+ zynq_ocm = devm_kzalloc(&pdev->dev, sizeof(*zynq_ocm), GFP_KERNEL);
+ if (!zynq_ocm)
+ return -ENOMEM;
+
+ zynq_ocm->pool = devm_gen_pool_create(&pdev->dev,
+ ilog2(ZYNQ_OCM_GRANULARITY),
+ NUMA_NO_NODE, NULL);
+ if (!zynq_ocm->pool)
+ return -ENOMEM;
+
+ curr = 0; /* For storing current struct resource for OCM */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ u32 base, start, end;
+
+ /* Setup base address for 64kB OCM block */
+ if (ocm_config & BIT(i))
+ base = ZYNQ_OCM_HIGHADDR;
+ else
+ base = ZYNQ_OCM_LOWADDR;
+
+ /* Calculate start and end block addresses */
+ start = i * ZYNQ_OCM_BLOCK_SIZE + base;
+ end = start + (ZYNQ_OCM_BLOCK_SIZE - 1);
+
+ /* Concatenate OCM blocks together to get bigger pool */
+ if (i > 0 && start == (zynq_ocm->res[curr - 1].end + 1)) {
+ zynq_ocm->res[curr - 1].end = end;
+ } else {
+#ifdef CONFIG_SMP
+ /*
+ * OCM block if placed at 0x0 has special meaning
+ * for SMP because jump trampoline is added there.
+ * Ensure that this address won't be allocated.
+ */
+ if (!base) {
+ u32 trampoline_code_size =
+ &zynq_secondary_trampoline_end -
+ &zynq_secondary_trampoline;
+ dev_dbg(&pdev->dev,
+ "Allocate reset vector table %dB\n",
+ trampoline_code_size);
+ /* postpone start offset */
+ start += trampoline_code_size;
+ }
+#endif
+ /* First resource is always initialized */
+ zynq_ocm->res[curr].start = start;
+ zynq_ocm->res[curr].end = end;
+ zynq_ocm->res[curr].flags = IORESOURCE_MEM;
+ curr++; /* Increment curr value */
+ }
+ dev_dbg(&pdev->dev, "OCM block %d, start %x, end %x\n",
+ i, start, end);
+ }
+
+ /*
+ * Separate pool allocation from OCM block detection to ensure
+ * the biggest possible pool.
+ */
+ for (i = 0; i < ZYNQ_OCM_BLOCKS; i++) {
+ unsigned long size;
+ void __iomem *virt_base;
+
+ /* Skip all zero size resources */
+ if (zynq_ocm->res[i].end == 0)
+ break;
+ dev_dbg(&pdev->dev, "OCM resources %d, start %x, end %x\n",
+ i, zynq_ocm->res[i].start, zynq_ocm->res[i].end);
+ size = resource_size(&zynq_ocm->res[i]);
+ virt_base = devm_ioremap_resource(&pdev->dev,
+ &zynq_ocm->res[i]);
+ if (IS_ERR(virt_base))
+ return PTR_ERR(virt_base);
+
+ ret = gen_pool_add_virt(zynq_ocm->pool,
+ (unsigned long)virt_base,
+ zynq_ocm->res[i].start, size, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Gen pool failed\n");
+ return ret;
+ }
+ dev_info(&pdev->dev, "ZYNQ OCM pool: %ld KiB @ 0x%p\n",
+ size / 1024, virt_base);
+ }
+
+ /* Get OCM config space */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ zynq_ocm->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(zynq_ocm->base))
+ return PTR_ERR(zynq_ocm->base);
+
+ /* Allocate OCM parity IRQ */
+ zynq_ocm->irq = platform_get_irq(pdev, 0);
+ if (zynq_ocm->irq < 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ return zynq_ocm->irq;
+ }
+ ret = devm_request_irq(&pdev->dev, zynq_ocm->irq, zynq_ocm_irq_handler,
+ 0, pdev->name, zynq_ocm);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ return ret;
+ }
+
+ /* Enable parity errors */
+ writel(ZYNQ_OCM_PARITY_ENABLE, zynq_ocm->base + ZYNQ_OCM_PARITY_CTRL);
+
+ platform_set_drvdata(pdev, zynq_ocm);
+
+ return 0;
+}
+
+/**
+ * zynq_ocm_remove - Remove method for the OCM driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on failure
+ */
+static int zynq_ocm_remove(struct platform_device *pdev)
+{
+ struct zynq_ocm_dev *zynq_ocm = platform_get_drvdata(pdev);
+
+ if (gen_pool_avail(zynq_ocm->pool) < gen_pool_size(zynq_ocm->pool))
+ dev_dbg(&pdev->dev, "removed while SRAM allocated\n");
+
+ return 0;
+}
+
+static struct of_device_id zynq_ocm_dt_ids[] = {
+ { .compatible = "xlnx,zynq-ocmc-1.0" },
+ { /* end of table */ }
+};
+
+static struct platform_driver zynq_ocm_driver = {
+ .driver = {
+ .name = "zynq-ocm",
+ .of_match_table = zynq_ocm_dt_ids,
+ },
+ .probe = zynq_ocm_probe,
+ .remove = zynq_ocm_remove,
+};
+
+static int __init zynq_ocm_init(void)
+{
+ return platform_driver_register(&zynq_ocm_driver);
+}
+
+arch_initcall(zynq_ocm_init);
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 3c7e310fd8bf..2cc0f8ebd687 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -300,6 +300,8 @@ config ARCH_ZX
config ARCH_ZYNQMP
bool "Xilinx ZynqMP Family"
+ select PINCTRL
+ select PINCTRL_ZYNQMP
help
This enables support for Xilinx ZynqMP Family
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 60f5443f3ef4..c4dd722f4396 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -2,7 +2,6 @@
dtb-$(CONFIG_ARCH_ZYNQMP) += avnet-ultra96-rev1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1232-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1254-revA.dtb
-dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1275-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm015-dc1.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm016-dc2.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zc1751-xm017-dc3.dtb
@@ -13,5 +12,11 @@ dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-revB.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu102-rev1.0.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu104-revC.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu106-revA.dtb
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu111-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1275-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1275-revB.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu1285-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu208-revA.dtb
+dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-zcu216-revA.dtb
diff --git a/arch/arm64/boot/dts/xilinx/avnet-ultra96-rev1.dts b/arch/arm64/boot/dts/xilinx/avnet-ultra96-rev1.dts
index 88aa06fa78a8..ddb8febaece1 100644
--- a/arch/arm64/boot/dts/xilinx/avnet-ultra96-rev1.dts
+++ b/arch/arm64/boot/dts/xilinx/avnet-ultra96-rev1.dts
@@ -2,7 +2,7 @@
/*
* dts file for Avnet Ultra96 rev1
*
- * (C) Copyright 2018, Xilinx, Inc.
+ * (C) Copyright 2018 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
index 9868ca15dfc5..329d1c5dbca6 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -2,13 +2,37 @@
/*
* Clock specification for Xilinx ZynqMP
*
- * (C) Copyright 2017 - 2019, Xilinx, Inc.
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
/ {
+ fclk0: fclk0 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL0_REF>;
+ };
+
+ fclk1: fclk1 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL1_REF>;
+ };
+
+ fclk2: fclk2 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL2_REF>;
+ };
+
+ fclk3: fclk3 {
+ status = "okay";
+ compatible = "xlnx,fclk";
+ clocks = <&zynqmp_clk PL3_REF>;
+ };
+
pss_ref_clk: pss_ref_clk {
u-boot,dm-pre-reloc;
compatible = "fixed-clock";
@@ -43,6 +67,25 @@
#clock-cells = <0>;
clock-frequency = <27000000>;
};
+
+ dp_aclk: dp_aclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-accuracy = <100>;
+ };
+};
+
+&zynqmp_firmware {
+ zynqmp_clk: clock-controller {
+ u-boot,dm-pre-reloc;
+ #clock-cells = <1>;
+ compatible = "xlnx,zynqmp-clk";
+ clocks = <&pss_ref_clk>, <&video_clk>, <&pss_alt_ref_clk>,
+ <&aux_ref_clk>, <&gt_crx_ref_clk>;
+ clock-names = "pss_ref_clk", "video_clk", "pss_alt_ref_clk",
+ "aux_ref_clk", "gt_crx_ref_clk";
+ };
};
&can0 {
@@ -89,6 +132,10 @@
clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
+&gpu {
+ clocks = <&zynqmp_clk GPU_REF>, <&zynqmp_clk GPU_PP0_REF>, <&zynqmp_clk GPU_PP1_REF>;
+};
+
&lpd_dma_chan1 {
clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
@@ -121,6 +168,10 @@
clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
+&nand0 {
+ clocks = <&zynqmp_clk NAND_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
&gem0 {
clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM0_REF>,
<&zynqmp_clk GEM0_TX>, <&zynqmp_clk GEM0_RX>,
@@ -161,10 +212,30 @@
clocks = <&zynqmp_clk I2C1_REF>;
};
+&perf_monitor_ocm {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_cci {
+ clocks = <&zynqmp_clk TOPSW_LSBUS>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&zynqmp_clk LPD_LSBUS>;
+};
+
&pcie {
clocks = <&zynqmp_clk PCIE_REF>;
};
+&qspi {
+ clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+};
+
&sata {
clocks = <&zynqmp_clk SATA_REF>;
};
@@ -220,3 +291,27 @@
&watchdog0 {
clocks = <&zynqmp_clk WDT>;
};
+
+&lpd_watchdog {
+ clocks = <&zynqmp_clk LPD_WDT>;
+};
+
+&xilinx_ams {
+ clocks = <&zynqmp_clk AMS_REF>;
+};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&zynqmp_clk DP_AUDIO_REF>, <&zynqmp_clk DP_VIDEO_REF>;
+};
+
+&xlnx_dpdma {
+ clocks = <&zynqmp_clk DPDMA_REF>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&zynqmp_clk DP_AUDIO_REF>;
+};
+
+&zynqmp_pcap {
+ clocks = <&zynqmp_clk PCAP>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
new file mode 100644
index 000000000000..5087c4980088
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk.dtsi
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Clock specification for Xilinx ZynqMP
+ *
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/ {
+ clk100: clk100 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ u-boot,dm-pre-reloc;
+ };
+
+ clk125: clk125 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ clk200: clk200 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ u-boot,dm-pre-reloc;
+ };
+
+ clk250: clk250 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <250000000>;
+ };
+
+ clk300: clk300 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <300000000>;
+ u-boot,dm-pre-reloc;
+ };
+
+ clk600: clk600 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <600000000>;
+ };
+
+ dp_aclk: clock0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-accuracy = <100>;
+ };
+
+ dp_aud_clk: clock1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ clock-accuracy = <100>;
+ };
+
+ dpdma_clk: dpdma-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0x0>;
+ clock-frequency = <533000000>;
+ };
+
+ drm_clock: drm-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0x0>;
+ clock-frequency = <262750000>;
+ clock-accuracy = <0x64>;
+ };
+};
+
+&can0 {
+ clocks = <&clk100 &clk100>;
+};
+
+&can1 {
+ clocks = <&clk100 &clk100>;
+};
+
+&fpd_dma_chan1 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan2 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan3 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan4 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan5 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan6 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan7 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&fpd_dma_chan8 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan1 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan2 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan3 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan4 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan5 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan6 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan7 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&lpd_dma_chan8 {
+ clocks = <&clk600>, <&clk100>;
+};
+
+&nand0 {
+ clocks = <&clk100 &clk100>;
+};
+
+&gem0 {
+ clocks = <&clk125>, <&clk125>, <&clk125>;
+};
+
+&gem1 {
+ clocks = <&clk125>, <&clk125>, <&clk125>;
+};
+
+&gem2 {
+ clocks = <&clk125>, <&clk125>, <&clk125>;
+};
+
+&gem3 {
+ clocks = <&clk125>, <&clk125>, <&clk125>;
+};
+
+&gpio {
+ clocks = <&clk100>;
+};
+
+&i2c0 {
+ clocks = <&clk100>;
+};
+
+&i2c1 {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_ocm {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_ddr {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_cci {
+ clocks = <&clk100>;
+};
+
+&perf_monitor_lpd {
+ clocks = <&clk100>;
+};
+
+&qspi {
+ clocks = <&clk300 &clk300>;
+};
+
+&sata {
+ clocks = <&clk250>;
+};
+
+&sdhci0 {
+ clocks = <&clk200 &clk200>;
+};
+
+&sdhci1 {
+ clocks = <&clk200 &clk200>;
+};
+
+&spi0 {
+ clocks = <&clk200 &clk200>;
+};
+
+&spi1 {
+ clocks = <&clk200 &clk200>;
+};
+
+&uart0 {
+ clocks = <&clk100 &clk100>;
+};
+
+&uart1 {
+ clocks = <&clk100 &clk100>;
+};
+
+&usb0 {
+ clocks = <&clk250>, <&clk250>;
+};
+
+&usb1 {
+ clocks = <&clk250>, <&clk250>;
+};
+
+&watchdog0 {
+ clocks = <&clk100>;
+};
+
+&lpd_watchdog {
+ clocks = <&clk250>;
+};
+
+&zynqmp_dpsub {
+ clocks = <&dp_aclk>, <&dp_aud_clk>, <&drm_clock>;
+};
+
+&xlnx_dpdma {
+ clocks = <&dpdma_clk>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ clocks = <&dp_aud_clk>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
index 2e05fa416955..afb3e96520b8 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1232-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZC1232
*
- * (C) Copyright 2017 - 2019, Xilinx, Inc.
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -11,6 +11,7 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZC1232 RevA";
@@ -19,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -36,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB FIXME */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&sata {
status = "okay";
/* SATA OOB timing settings */
@@ -47,6 +78,8 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane0 PHY_TYPE_SATA 0 0 125000000>, <&lane1 PHY_TYPE_SATA 1 1 125000000>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
index 3d0aaa02f184..9cc1c0c6c5a7 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1254-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZC1254
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Siva Durga Prasad Paladugu <sivadur@xilinx.com>
@@ -20,6 +20,7 @@
aliases {
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
};
chosen {
@@ -37,6 +38,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
deleted file mode 100644
index 66a90483b004..000000000000
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1275-revA.dts
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * dts file for Xilinx ZynqMP ZC1275
- *
- * (C) Copyright 2017 - 2019, Xilinx, Inc.
- *
- * Michal Simek <michal.simek@xilinx.com>
- * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
- */
-
-/dts-v1/;
-
-#include "zynqmp.dtsi"
-#include "zynqmp-clk-ccf.dtsi"
-
-/ {
- model = "ZynqMP ZC1275 RevA";
- compatible = "xlnx,zynqmp-zc1275-revA", "xlnx,zynqmp-zc1275", "xlnx,zynqmp";
-
- aliases {
- serial0 = &uart0;
- serial1 = &dcc;
- };
-
- chosen {
- bootargs = "earlycon";
- stdout-path = "serial0:115200n8";
- };
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x80000000>;
- };
-};
-
-&dcc {
- status = "okay";
-};
-
-&uart0 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
index 69f6e4610739..da7aab0801fe 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP zc1751-xm015-dc1
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -11,7 +11,9 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm015-dc1 RevA";
@@ -19,11 +21,14 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -73,6 +78,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: ethernet-phy@0 {
reg = <0>;
};
@@ -80,12 +87,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 36 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>;
eeprom: eeprom@55 {
compatible = "atmel,24c64"; /* 24AA64 */
@@ -93,6 +110,245 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_9_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_9_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_36_grp", "gpio0_37_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_8_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO34";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO35";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_38_grp";
+ };
+
+ conf {
+ groups = "gpio0_38_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* Micron MT25QU512ABB8ESF */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -108,25 +364,80 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x19 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 150000000>;
};
/* eMMC */
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
bus-width = <8>;
+ xlnx,mio_bank = <0>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ /*
+ * This property should be removed for supporting UHS mode
+ */
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 0 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
index 4a86efa32d68..0a4b651cb87e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP zc1751-xm016-dc2
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -12,6 +12,7 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm016-dc2 RevA";
@@ -21,12 +22,14 @@
can0 = &can0;
can1 = &can1;
ethernet0 = &gem2;
+ gpio0 = &gpio;
i2c0 = &i2c0;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb1;
};
chosen {
@@ -42,10 +45,14 @@
&can0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can0_default>;
};
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&fpd_dma_chan1 {
@@ -84,6 +91,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem2_default>;
phy0: ethernet-phy@5 {
reg = <5>;
ti,rx-internal-delay = <0x8>;
@@ -100,6 +109,11 @@
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 6 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 7 GPIO_ACTIVE_HIGH>;
tca6416_u26: gpio@20 {
compatible = "ti,tca6416";
@@ -115,6 +129,353 @@
};
};
+&nand0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_nand0_default>;
+ arasan,has-mdma;
+
+ nand@0 {
+ reg = <0x0>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+ nand@1 {
+ reg = <0x1>;
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x0 0x1c00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x0 0x3400000 0xfcc00000>;
+ };
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_can0_default: can0-default {
+ mux {
+ function = "can0";
+ groups = "can0_9_grp";
+ };
+
+ conf {
+ groups = "can0_9_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO38";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO39";
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_8_grp";
+ };
+
+ conf {
+ groups = "can1_8_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO33";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO32";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_1_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_6_grp", "gpio0_7_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_10_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO42";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO43";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_10_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_10_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO41";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO40";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem2_default: gem2-default {
+ mux {
+ function = "ethernet2";
+ groups = "ethernet2_0_grp";
+ };
+
+ conf {
+ groups = "ethernet2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO58", "MIO59", "MIO60", "MIO61", "MIO62",
+ "MIO63";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO52", "MIO53", "MIO54", "MIO55", "MIO56",
+ "MIO57";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio2";
+ groups = "mdio2_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio2_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_nand0_default: nand0-default {
+ mux {
+ groups = "nand0_0_grp";
+ function = "nand0";
+ };
+
+ conf {
+ groups = "nand0_0_grp";
+ bias-pull-up;
+ };
+
+ mux-ce {
+ groups = "nand0_ce_0_grp";
+ function = "nand0_ce";
+ };
+
+ conf-ce {
+ groups = "nand0_ce_0_grp";
+ bias-pull-up;
+ };
+
+ mux-rb {
+ groups = "nand0_rb_0_grp";
+ function = "nand0_rb";
+ };
+
+ conf-rb {
+ groups = "nand0_rb_0_grp";
+ bias-pull-up;
+ };
+
+ mux-dqs {
+ groups = "nand0_dqs_0_grp";
+ function = "nand0_dqs";
+ };
+
+ conf-dqs {
+ groups = "nand0_dqs_0_grp";
+ bias-pull-up;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_0_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_0_grp", "spi0_ss_1_grp",
+ "spi0_ss_2_grp";
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_3_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_9_grp", "spi1_ss_10_grp",
+ "spi1_ss_11_grp";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -122,6 +483,8 @@
&spi0 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
spi0_flash0: flash@0 {
#address-cells = <1>;
@@ -131,7 +494,7 @@
reg = <0>;
partition@0 {
- label = "data";
+ label = "spi0-data";
reg = <0x0 0x100000>;
};
};
@@ -140,6 +503,8 @@
&spi1 {
status = "okay";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
spi1_flash0: flash@0 {
#address-cells = <1>;
@@ -149,7 +514,7 @@
reg = <0>;
partition@0 {
- label = "data";
+ label = "spi1-data";
reg = <0x0 0x84000>;
};
};
@@ -159,12 +524,23 @@
&usb1 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
index 4ea6ef5a7f2b..c7de59e1e986 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm017-dc3.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP zc1751-xm017-dc3
*
- * (C) Copyright 2016 - 2019, Xilinx, Inc.
+ * (C) Copyright 2016 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -18,12 +18,15 @@
aliases {
ethernet0 = &gem0;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ usb0 = &usb0;
+ usb1 = &usb1;
};
chosen {
@@ -107,6 +110,63 @@
clock-frequency = <400000>;
};
+/* MT29F64G08AECDBJ4-6 */
+&nand0 {
+ status = "okay";
+ arasan,has-mdma;
+ num-cs = <2>;
+
+ partition@0 { /* for testing purpose */
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x0 0x400000>;
+ };
+ partition@1 { /* for testing purpose */
+ label = "nand-linux";
+ reg = <0x0 0x400000 0x1400000>;
+ };
+ partition@2 { /* for testing purpose */
+ label = "nand-device-tree";
+ reg = <0x0 0x1800000 0x400000>;
+ };
+ partition@3 { /* for testing purpose */
+ label = "nand-rootfs";
+ reg = <0x0 0x1C00000 0x1400000>;
+ };
+ partition@4 { /* for testing purpose */
+ label = "nand-bitstream";
+ reg = <0x0 0x3000000 0x400000>;
+ };
+ partition@5 { /* for testing purpose */
+ label = "nand-misc";
+ reg = <0x0 0x3400000 0xFCC00000>;
+ };
+
+ partition@6 { /* for testing purpose */
+ label = "nand1-fsbl-uboot";
+ reg = <0x1 0x0 0x400000>;
+ };
+ partition@7 { /* for testing purpose */
+ label = "nand1-linux";
+ reg = <0x1 0x400000 0x1400000>;
+ };
+ partition@8 { /* for testing purpose */
+ label = "nand1-device-tree";
+ reg = <0x1 0x1800000 0x400000>;
+ };
+ partition@9 { /* for testing purpose */
+ label = "nand1-rootfs";
+ reg = <0x1 0x1C00000 0x1400000>;
+ };
+ partition@10 { /* for testing purpose */
+ label = "nand1-bitstream";
+ reg = <0x1 0x3000000 0x400000>;
+ };
+ partition@11 { /* for testing purpose */
+ label = "nand1-misc";
+ reg = <0x1 0x3400000 0xFCC00000>;
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
index 2366cd9f091a..9b38b8b919e2 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP zc1751-xm018-dc4
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -17,15 +17,19 @@
compatible = "xlnx,zynqmp-zc1751", "xlnx,zynqmp";
aliases {
+ can0 = &can0;
+ can1 = &can1;
ethernet0 = &gem0;
ethernet1 = &gem1;
ethernet2 = &gem2;
ethernet3 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &qspi;
};
chosen {
@@ -111,6 +115,14 @@
status = "okay";
};
+&zynqmp_dpsub {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
@@ -151,6 +163,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
status = "okay";
@@ -161,6 +177,35 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
index 41934e3525c6..9427bd57aa1b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm019-dc5.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP zc1751-xm019-dc5
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Siva Durga Prasad <siva.durga.paladugu@xilinx.com>
* Michal Simek <michal.simek@xilinx.com>
@@ -13,6 +13,7 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
/ {
model = "ZynqMP zc1751-xm019-dc5 RevA";
@@ -20,6 +21,7 @@
aliases {
ethernet0 = &gem1;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci0;
@@ -74,6 +76,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem1_default>;
phy0: ethernet-phy@0 {
reg = <0>;
};
@@ -85,41 +89,366 @@
&i2c0 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 74 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 75 GPIO_ACTIVE_HIGH>;
};
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 76 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 77 GPIO_ACTIVE_HIGH>;
+
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_18_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_18_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_74_grp", "gpio0_75_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_19_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_19_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_76_grp", "gpio0_77_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_17_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO71";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_18_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_18_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO73";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO72";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem1_default: gem1-default {
+ mux {
+ function = "ethernet1";
+ groups = "ethernet1_0_grp";
+ };
+
+ conf {
+ groups = "ethernet1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO44", "MIO45", "MIO46", "MIO47", "MIO48",
+ "MIO49";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO38", "MIO39", "MIO40", "MIO41", "MIO42",
+ "MIO43";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio1";
+ groups = "mdio1_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_0_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio0_wp_0_grp";
+ function = "sdio0_wp";
+ };
+
+ conf-wp {
+ groups = "sdio0_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_watchdog0_default: watchdog0-default {
+ mux-clk {
+ groups = "swdt0_clk_1_grp";
+ function = "swdt0_clk";
+ };
+
+ conf-clk {
+ groups = "swdt0_clk_1_grp";
+ bias-pull-up;
+ };
+
+ mux-rst {
+ groups = "swdt0_rst_1_grp";
+ function = "swdt0_rst";
+ };
+
+ conf-rst {
+ groups = "swdt0_rst_1_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc0_default: ttc0-default {
+ mux-clk {
+ groups = "ttc0_clk_0_grp";
+ function = "ttc0_clk";
+ };
+
+ conf-clk {
+ groups = "ttc0_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc0_wav_0_grp";
+ function = "ttc0_wav";
+ };
+
+ conf-wav {
+ groups = "ttc0_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc1_default: ttc1-default {
+ mux-clk {
+ groups = "ttc1_clk_0_grp";
+ function = "ttc1_clk";
+ };
+
+ conf-clk {
+ groups = "ttc1_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc1_wav_0_grp";
+ function = "ttc1_wav";
+ };
+
+ conf-wav {
+ groups = "ttc1_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc2_default: ttc2-default {
+ mux-clk {
+ groups = "ttc2_clk_0_grp";
+ function = "ttc2_clk";
+ };
+
+ conf-clk {
+ groups = "ttc2_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc2_wav_0_grp";
+ function = "ttc2_wav";
+ };
+
+ conf-wav {
+ groups = "ttc2_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
+
+ pinctrl_ttc3_default: ttc3-default {
+ mux-clk {
+ groups = "ttc3_clk_0_grp";
+ function = "ttc3_clk";
+ };
+
+ conf-clk {
+ groups = "ttc3_clk_0_grp";
+ bias-pull-up;
+ };
+
+ mux-wav {
+ groups = "ttc3_wav_0_grp";
+ function = "ttc3_wav";
+ };
+
+ conf-wav {
+ groups = "ttc3_wav_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ };
+ };
};
&sdhci0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
no-1-8-v;
+ xlnx,mio_bank = <0>;
};
&ttc0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc0_default>;
};
&ttc1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc1_default>;
};
&ttc2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc2_default>;
};
&ttc3 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ttc3_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
&watchdog0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_watchdog0_default>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index d60110ad8367..4838ee538d69 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU100 revC
*
- * (C) Copyright 2016 - 2019, Xilinx, Inc.
+ * (C) Copyright 2016 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Nathalie Chan King Choy
@@ -15,12 +15,15 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU100 RevC";
compatible = "xlnx,zynqmp-zcu100-revC", "xlnx,zynqmp-zcu100", "xlnx,zynqmp";
aliases {
+ gpio0 = &gpio;
i2c0 = &i2c1;
rtc0 = &rtc;
serial0 = &uart1;
@@ -28,6 +31,8 @@
serial2 = &dcc;
spi0 = &spi0;
spi1 = &spi1;
+ usb0 = &usb0;
+ usb1 = &usb1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
};
@@ -54,6 +59,15 @@
};
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&xilinx_ams 0>, <&xilinx_ams 1>, <&xilinx_ams 2>,
+ <&xilinx_ams 3>, <&xilinx_ams 4>, <&xilinx_ams 5>,
+ <&xilinx_ams 6>, <&xilinx_ams 7>, <&xilinx_ams 8>,
+ <&xilinx_ams 9>, <&xilinx_ams 10>,
+ <&xilinx_ams 11>, <&xilinx_ams 12>;
+ };
+
leds {
compatible = "gpio-leds";
ds2 {
@@ -89,6 +103,15 @@
};
};
+ ltc2954: ltc2954 { /* U7 */
+ compatible = "lltc,ltc2954", "lltc,ltc2952";
+ status = "disabled";
+ trigger-gpios = <&gpio 26 GPIO_ACTIVE_LOW>; /* INT line - input */
+ /* If there is HW watchdog on mezzanine this signal should be connected there */
+ watchdog-gpios = <&gpio 35 GPIO_ACTIVE_HIGH>; /* MIO on PAD */
+ kill-gpios = <&gpio 34 GPIO_ACTIVE_LOW>; /* KILL signal - output */
+ };
+
wmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
regulator-name = "wmmcsdio_fixed";
@@ -145,8 +168,17 @@
"", "", "", "";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 4 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
clock-frequency = <100000>;
i2c-mux@75 { /* u11 */
compatible = "nxp,pca9548";
@@ -224,6 +256,221 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_1_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_1_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_4_grp", "gpio0_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci0_default: sdhci0-default {
+ mux {
+ groups = "sdio0_3_grp";
+ function = "sdio0";
+ };
+
+ conf {
+ groups = "sdio0_3_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio0_cd_0_grp";
+ function = "sdio0_cd";
+ };
+
+ conf-cd {
+ groups = "sdio0_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_2_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_2_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_spi0_default: spi0-default {
+ mux {
+ groups = "spi0_3_grp";
+ function = "spi0";
+ };
+
+ conf {
+ groups = "spi0_3_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi0_ss_9_grp";
+ function = "spi0_ss";
+ };
+
+ conf-cs {
+ groups = "spi0_ss_9_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_spi1_default: spi1-default {
+ mux {
+ groups = "spi1_0_grp";
+ function = "spi1";
+ };
+
+ conf {
+ groups = "spi1_0_grp";
+ bias-disable;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-cs {
+ groups = "spi1_ss_0_grp";
+ function = "spi1_ss";
+ };
+
+ conf-cs {
+ groups = "spi1_ss_0_grp";
+ bias-disable;
+ };
+
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_0_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO3";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO2";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_0_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO1";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO0";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb1_default: usb1-default {
+ mux {
+ groups = "usb1_0_grp";
+ function = "usb1";
+ };
+
+ conf {
+ groups = "usb1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO64", "MIO65", "MIO67";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO66", "MIO68", "MIO69", "MIO70", "MIO71",
+ "MIO72", "MIO73", "MIO74", "MIO75";
+ bias-disable;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -233,11 +480,17 @@
status = "okay";
no-1-8-v;
disable-wp;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci0_default>;
+ xlnx,mio_bank = <0>;
};
&sdhci1 {
status = "okay";
bus-width = <0x4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <0>;
non-removable;
disable-wp;
cap-power-off-card;
@@ -253,20 +506,30 @@
};
};
+&serdes {
+ status = "okay";
+};
+
&spi0 { /* Low Speed connector */
status = "okay";
label = "LS-SPI0";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
};
&spi1 { /* High Speed connector */
status = "okay";
label = "HS-SPI1";
num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
bluetooth {
compatible = "ti,wl1831-st";
enable-gpios = <&gpio 8 GPIO_ACTIVE_HIGH>;
@@ -275,6 +538,8 @@
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
@@ -282,14 +547,69 @@
&usb0 {
status = "okay";
dr_mode = "peripheral";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "peripheral";
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 0 26000000>;
+ maximum-speed = "super-speed";
};
/* ULPI SMSC USB3320 */
&usb1 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb1_default>;
+};
+
+&dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+ phy-names = "usb3-phy";
+ phys = <&lane3 PHY_TYPE_USB3 1 0 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>,
+ <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
index 6647e97edba3..d508f3359943 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-rev1.0.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU102 Rev1.0
*
- * (C) Copyright 2016 - 2018, Xilinx, Inc.
+ * (C) Copyright 2016 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 4f801721564f..729782c48a3d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU102 RevA
*
- * (C) Copyright 2015 - 2019, Xilinx, Inc.
+ * (C) Copyright 2015 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -13,6 +13,8 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU102 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -136,6 +142,8 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
@@ -178,22 +186,36 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: ethernet-phy@21 {
reg = <21>;
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
};
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
@@ -437,7 +459,6 @@
status = "disabled"; /* unreachable */
reg = <0x20>;
};
-
max20751@72 { /* u95 */
compatible = "maxim,max20751";
reg = <0x72>;
@@ -454,6 +475,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -483,6 +509,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "silabs,si5341";
reg = <0x36>;
};
@@ -520,6 +547,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
/*
* Chip has interrupt present connected to PL
@@ -588,10 +616,303 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux-sw {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf-sw {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22", "MIO23";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
&pcie {
status = "okay";
};
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -607,28 +928,94 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
+ /*
+ * 1.0 revision has level shifter and this property should be
+ * removed for supporting UHS mode
+ */
status = "okay";
no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>;
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
index d9ad8a4b20d3..2422558b7484 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU102 RevB
*
- * (C) Copyright 2016 - 2018, Xilinx, Inc.
+ * (C) Copyright 2016 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -22,6 +22,7 @@
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
ti,dp83867-rxctrl-strap-quirk;
+ /* reset-gpios = <&tca6416_u97 6 GPIO_ACTIVE_LOW>; */
};
/* Cleanup from RevA */
/delete-node/ ethernet-phy@21;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index 7a4614e3f5fa..791093eeedb1 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU104
*
- * (C) Copyright 2017 - 2019, Xilinx, Inc.
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -12,6 +12,8 @@
#include "zynqmp.dtsi"
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU104 RevA";
@@ -19,12 +21,15 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
@@ -40,16 +45,52 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
status = "okay";
};
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
&gem3 {
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
@@ -63,9 +104,18 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* Another connection to this bus via PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -85,7 +135,7 @@
* 512B - 768B address 0x56
* 768B - 1024B address 0x57
*/
- eeprom@54 { /* u23 */
+ eeprom: eeprom@54 { /* u23 */
compatible = "atmel,24c08";
reg = <0x54>;
#address-cells = <1>;
@@ -98,6 +148,7 @@
#size-cells = <0>;
reg = <1>;
clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
reg = <0x6c>;
};
};
@@ -106,11 +157,13 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- irps5401_43: irps54012@43 { /* IRPS5401 - u175 */
- reg = <0x43>;
+ irps5401_43: irps5401@43 { /* IRPS5401 - u175 */
+ compatible = "infineon,irps5401";
+ reg = <0x43>; /* pmbus / i2c 0x13 */
};
- irps5401_4d: irps54012@4d { /* IRPS5401 - u180 */
- reg = <0x4d>;
+ irps5401_44: irps5401@44 { /* IRPS5401 - u180 */
+ compatible = "infineon,irps5401";
+ reg = <0x44>; /* pmbus / i2c 0x14 */
};
};
@@ -154,6 +207,233 @@
};
};
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -169,29 +449,91 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
disable-wp;
};
+&serdes {
+ status = "okay";
+};
+
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
};
&watchdog0 {
status = "okay";
};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
new file mode 100644
index 000000000000..c974ee80a0cc
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU104
+ *
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU104 RevC";
+ compatible = "xlnx,zynqmp-zcu104-revC", "xlnx,zynqmp-zcu104", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ gpio0 = &gpio;
+ i2c0 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ ina226 {
+ compatible = "iio-hwmon";
+ io-channels = <&u183 0>, <&u183 1>, <&u183 2>, <&u183 3>;
+ };
+};
+
+&can1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
+ phy0: ethernet-phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+
+ tca6416_u97: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /*
+ * IRQ not connected
+ * Lines:
+ * 0 - IRPS5401_ALERT_B
+ * 1 - HDMI_8T49N241_INT_ALM
+ * 2 - MAX6643_OT_B
+ * 3 - MAX6643_FANFAIL_B
+ * 5 - IIC_MUX_RESET_B
+ * 6 - GEM3_EXP_RESET_B
+ * 7 - FMC_LPC_PRSNT_M2C_B
+ * 4, 10 - 17 - not connected
+ */
+ };
+
+ /* Another connection to this bus via PL i2c via PCA9306 - u45 */
+ i2c-mux@74 { /* u34 */
+ compatible = "nxp,pca9548";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u23 */
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ clock_8t49n287: clock-generator@6c { /* 8T49N287 - u182 */
+ compatible = "idt,8t49n287";
+ reg = <0x6c>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ irps5401_43: irps5401@43 { /* IRPS5401 - u175 */
+ compatible = "infineon,irps5401";
+ reg = <0x43>; /* pmbus / i2c 0x13 */
+ };
+ irps5401_44: irps5401@44 { /* IRPS5401 - u180 */
+ compatible = "infineon,irps5401";
+ reg = <0x44>; /* pmbus / i2c 0x14 */
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ u183: ina226@40 { /* u183 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ };
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
+
+ /* 4, 6 not connected */
+ };
+};
+
+&pinctrl0 {
+ status = "okay";
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ drive-strength = <12>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* n25q512a 128MiB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ xlnx,mio_bank = <1>;
+ disable-wp;
+};
+
+&serdes {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
+};
+
+&uart1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+ maximum-speed = "super-speed";
+};
+
+&watchdog0 {
+ status = "okay";
+};
+
+&xilinx_ams {
+ status = "okay";
+};
+
+&ams_ps {
+ status = "okay";
+};
+
+&ams_pl {
+ status = "okay";
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 6e9efe233838..f3ec31325419 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU106
*
- * (C) Copyright 2016 - 2019, Xilinx, Inc.
+ * (C) Copyright 2016 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -13,6 +13,8 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU106 RevA";
@@ -20,6 +22,7 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
@@ -27,11 +30,14 @@
serial0 = &uart0;
serial1 = &uart1;
serial2 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -136,13 +142,14 @@
&can1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
};
&dcc {
status = "okay";
};
-/* fpd_dma clk 667MHz, lpd_dma 500MHz */
&fpd_dma_chan1 {
status = "okay";
};
@@ -179,6 +186,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
@@ -190,11 +199,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u97: gpio@20 {
compatible = "ti,tca6416";
@@ -453,6 +473,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
/* PL i2c via PCA9306 - u45 */
i2c-mux@74 { /* u34 */
@@ -482,6 +507,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u69 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -519,6 +545,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 {/* SI5328 - u20 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -591,6 +618,299 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_uart1_default: uart1-default {
+ mux {
+ groups = "uart1_5_grp";
+ function = "uart1";
+ };
+
+ conf {
+ groups = "uart1_5_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO21";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO20";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_can1_default: can1-default {
+ mux {
+ function = "can1";
+ groups = "can1_6_grp";
+ };
+
+ conf {
+ groups = "can1_6_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO25";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO24";
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-wp {
+ groups = "sdio1_wp_0_grp";
+ function = "sdio1_wp";
+ };
+
+ conf-wp {
+ groups = "sdio1_wp_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -606,28 +926,80 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 1 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ /*
+ * This property should be removed for supporting UHS mode
+ */
no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
};
&watchdog0 {
status = "okay";
};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 3 27000000>, <&lane0 PHY_TYPE_DP 1 3 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 2e92634c77f9..df1c1e47f051 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP ZCU111
*
- * (C) Copyright 2017 - 2019, Xilinx, Inc.
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*/
@@ -13,6 +13,8 @@
#include "zynqmp-clk-ccf.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
/ {
model = "ZynqMP ZCU111 RevA";
@@ -20,17 +22,21 @@
aliases {
ethernet0 = &gem3;
+ gpio0 = &gpio;
i2c0 = &i2c0;
i2c1 = &i2c1;
mmc0 = &sdhci1;
rtc0 = &rtc;
serial0 = &uart0;
serial1 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
};
chosen {
bootargs = "earlycon";
stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
};
memory@0 {
@@ -158,6 +164,8 @@
status = "okay";
phy-handle = <&phy0>;
phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gem3_default>;
phy0: ethernet-phy@c {
reg = <0xc>;
ti,rx-internal-delay = <0x8>;
@@ -169,11 +177,22 @@
&gpio {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_default>;
+};
+
+&gpu {
+ status = "okay";
};
&i2c0 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
tca6416_u22: gpio@20 {
compatible = "ti,tca6416";
@@ -318,13 +337,16 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- irps5401_43: irps54012@43 { /* IRPS5401 - u53 check these */
+ irps5401_43: irps5401@43 { /* IRPS5401 - u53 check these */
+ compatible = "infineon,irps5401";
reg = <0x43>;
};
- irps5401_44: irps54012@44 { /* IRPS5401 - u55 */
+ irps5401_44: irps5401@44 { /* IRPS5401 - u55 */
+ compatible = "infineon,irps5401";
reg = <0x44>;
};
- irps5401_45: irps54012@45 { /* IRPS5401 - u57 */
+ irps5401_45: irps5401@45 { /* IRPS5401 - u57 */
+ compatible = "infineon,irps5401";
reg = <0x45>;
};
/* u68 IR38064 +0 */
@@ -346,6 +368,11 @@
&i2c1 {
status = "okay";
clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
i2c-mux@74 { /* u26 */
compatible = "nxp,pca9548";
@@ -374,6 +401,7 @@
#size-cells = <0>;
reg = <1>;
si5341: clock-generator@36 { /* SI5341 - u46 */
+ compatible = "si5341";
reg = <0x36>;
};
@@ -411,6 +439,7 @@
#size-cells = <0>;
reg = <4>;
si5328: clock-generator@69 { /* SI5328 - u48 */
+ compatible = "silabs,si5328";
reg = <0x69>;
};
};
@@ -497,6 +526,240 @@
};
};
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_uart0_default: uart0-default {
+ mux {
+ groups = "uart0_4_grp";
+ function = "uart0";
+ };
+
+ conf {
+ groups = "uart0_4_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO18";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO19";
+ bias-disable;
+ };
+ };
+
+ pinctrl_usb0_default: usb0-default {
+ mux {
+ groups = "usb0_0_grp";
+ function = "usb0";
+ };
+
+ conf {
+ groups = "usb0_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO52", "MIO53", "MIO55";
+ bias-high-impedance;
+ };
+
+ conf-tx {
+ pins = "MIO54", "MIO56", "MIO57", "MIO58", "MIO59",
+ "MIO60", "MIO61", "MIO62", "MIO63";
+ bias-disable;
+ };
+ };
+
+ pinctrl_gem3_default: gem3-default {
+ mux {
+ function = "ethernet3";
+ groups = "ethernet3_0_grp";
+ };
+
+ conf {
+ groups = "ethernet3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-rx {
+ pins = "MIO70", "MIO71", "MIO72", "MIO73", "MIO74",
+ "MIO75";
+ bias-high-impedance;
+ low-power-disable;
+ };
+
+ conf-tx {
+ pins = "MIO64", "MIO65", "MIO66", "MIO67", "MIO68",
+ "MIO69";
+ bias-disable;
+ low-power-enable;
+ };
+
+ mux-mdio {
+ function = "mdio3";
+ groups = "mdio3_0_grp";
+ };
+
+ conf-mdio {
+ groups = "mdio3_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdhci1_default: sdhci1-default {
+ mux {
+ groups = "sdio1_0_grp";
+ function = "sdio1";
+ };
+
+ conf {
+ groups = "sdio1_0_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ bias-disable;
+ };
+
+ mux-cd {
+ groups = "sdio1_cd_0_grp";
+ function = "sdio1_cd";
+ };
+
+ conf-cd {
+ groups = "sdio1_cd_0_grp";
+ bias-high-impedance;
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_gpio_default: gpio-default {
+ mux {
+ function = "gpio0";
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ };
+
+ conf {
+ groups = "gpio0_22_grp", "gpio0_23_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ mux-msp {
+ function = "gpio0";
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ };
+
+ conf-msp {
+ groups = "gpio0_13_grp", "gpio0_38_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+
+ conf-pull-up {
+ pins = "MIO22";
+ bias-pull-up;
+ };
+
+ conf-pull-none {
+ pins = "MIO13", "MIO23", "MIO38";
+ bias-disable;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
&rtc {
status = "okay";
};
@@ -512,20 +775,71 @@
ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 125000000>;
};
/* SD1 with level shifter */
&sdhci1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdhci1_default>;
+ disable-wp;
+ /*
+ * This property should be removed for supporting UHS mode
+ */
status = "okay";
no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
};
&uart0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
/* ULPI SMSC USB3320 */
&usb0 {
status = "okay";
dr_mode = "host";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
+
+&zynqmp_dpsub {
+ status = "okay";
+ phy-names = "dp-phy0", "dp-phy1";
+ phys = <&lane1 PHY_TYPE_DP 0 1 27000000>, <&lane0 PHY_TYPE_DP 1 1 27000000>;
+};
+
+&zynqmp_dp_snd_pcm0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_pcm1 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_card0 {
+ status = "okay";
+};
+
+&zynqmp_dp_snd_codec0 {
+ status = "okay";
+};
+
+&xlnx_dpdma {
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts
new file mode 100644
index 000000000000..cdd5c341878f
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revA.dts
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU1275
+ *
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1275 RevA";
+ compatible = "xlnx,zynqmp-zcu1275-revA", "xlnx,zynqmp-zcu1275",
+ "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts
new file mode 100644
index 000000000000..b33cbdae9fb7
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1275-revB.dts
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZCU1275 RevB
+ *
+ * (C) Copyright 2018 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1275 RevB";
+ compatible = "xlnx,zynqmp-zcu1275-revB", "xlnx,zynqmp-zcu1275",
+ "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&gem1 {
+ status = "okay";
+ /* U-Boot gmii-to-rgmii bridge */
+ phy-mode = "gmii";
+ phy-handle = <&gmiitorgmii>;
+ phy: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ gmiitorgmii: gmiitorgmii@8 {
+ compatible = "xlnx,gmii-to-rgmii-1.0";
+ reg = <8>;
+ phy-handle = <&phy>;
+ };
+
+ /* Linux gmii-to-rgmii bridge */
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ partition@0 { /* for testing purpose */
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 { /* for testing purpose */
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 { /* for testing purpose */
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 { /* for testing purpose */
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5E0000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ /*
+ * 1.0 revision has level shifter and this property should be
+ * removed for supporting UHS mode
+ */
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
new file mode 100644
index 000000000000..db85950611ae
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu1285-revA.dts
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx ZynqMP ZCU1285 RevA
+ *
+ * (C) Copyright 2018 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Siva Durga Prasad Paladugu <sivadur@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+
+/ {
+ model = "ZynqMP ZCU1285 RevA";
+ compatible = "xlnx,zynqmp-zcu1285-revA", "xlnx,zynqmp-zcu1285",
+ "xlnx,zynqmp";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ mmc0 = &sdhci1;
+ ethernet0 = &gem1; /* EMIO */
+ i2c = &i2c0; /* EMIO */
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ ina226-u60 {
+ compatible = "iio-hwmon";
+ io-channels = <&u60 0>, <&u60 1>, <&u60 2>, <&u60 3>;
+ };
+ ina226-u61 {
+ compatible = "iio-hwmon";
+ io-channels = <&u61 0>, <&u61 1>, <&u61 2>, <&u61 3>;
+ };
+ ina226-u63 {
+ compatible = "iio-hwmon";
+ io-channels = <&u63 0>, <&u63 1>, <&u63 2>, <&u63 3>;
+ };
+ ina226-u65 {
+ compatible = "iio-hwmon";
+ io-channels = <&u65 0>, <&u65 1>, <&u65 2>, <&u65 3>;
+ };
+ ina226-u64 {
+ compatible = "iio-hwmon";
+ io-channels = <&u64 0>, <&u64 1>, <&u64 2>, <&u64 3>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9548"; /* u22 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* PMBUS */
+ max20751@74 { /* u23 */
+ compatible = "maxim,max20751";
+ reg = <0x74>;
+ };
+ max20751@70 { /* u89 */
+ compatible = "maxim,max20751";
+ reg = <0x70>;
+ };
+ max15301@a { /* u28 */
+ compatible = "maxim,max15301";
+ reg = <0xa>;
+ };
+ max15303@b { /* u48 */
+ compatible = "maxim,max15303";
+ reg = <0xb>;
+ };
+ max15303@d { /* u27 */
+ compatible = "maxim,max15303";
+ reg = <0xd>;
+ };
+ max15303@e { /* u11 */
+ compatible = "maxim,max15303";
+ reg = <0xe>;
+ };
+ max15303@f { /* u96 */
+ compatible = "maxim,max15303";
+ reg = <0xf>;
+ };
+ max15303@11 { /* u47 */
+ compatible = "maxim,max15303";
+ reg = <0x11>;
+ };
+ max15303@12 { /* u24 */
+ compatible = "maxim,max15303";
+ reg = <0x12>;
+ };
+ max15301@13 { /* u29 */
+ compatible = "maxim,max15301";
+ reg = <0x13>;
+ };
+ max15303@14 { /* u51 */
+ compatible = "maxim,max15303";
+ reg = <0x14>;
+ };
+ max15303@15 { /* u30 */
+ compatible = "maxim,max15303";
+ reg = <0x15>;
+ };
+ max15303@16 { /* u102 */
+ compatible = "maxim,max15303";
+ reg = <0x16>;
+ };
+ max15301@17 { /* u50 */
+ compatible = "maxim,max15301";
+ reg = <0x17>;
+ };
+ max15301@18 { /* u31 */
+ compatible = "maxim,max15301";
+ reg = <0x18>;
+ };
+ };
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ /* CM_I2C */
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* SYS_EEPROM */
+ eeprom: eeprom@54 { /* u101 */
+ compatible = "atmel,24c32"; /* 24LC32A */
+ reg = <0x54>;
+ };
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* FMC1 */
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* FMC2 */
+ };
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* ANALOG_PMBUS */
+ u60: ina226@40 { /* u60 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-u60";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ u61: ina226@41 { /* u61 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-u61";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ u63: ina226@42 { /* u63 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-u63";
+ reg = <0x42>;
+ shunt-resistor = <1000>;
+ };
+ u65: ina226@43 { /* u65 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-u65";
+ reg = <0x43>;
+ shunt-resistor = <1000>;
+ };
+ u64: ina226@44 { /* u64 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-u64";
+ reg = <0x44>;
+ shunt-resistor = <1000>;
+ };
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* ANALOG_CM_I2C */
+ };
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ /* FMC3 */
+ };
+ };
+};
+
+&gem1 {
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <1>; /* KSZ9031RNXIC on AES-FMC-NETW1-G */
+ rxc-skew-ps = <1800>; /* Skew control of RX_CLK pad output */
+ txc-skew-ps = <1800>; /* Skew control of GTX_CLK pad input */
+ txen-skew-ps = <900>; /* Skew control of TX_CTL pad input */
+ rxdv-skew-ps = <0>; /* Skew control of RX_CTL pad output */
+ rxd0-skew-ps = <0>; /* Skew control of RXD0 pad output */
+ rxd1-skew-ps = <0>; /* Skew control of RXD1 pad output */
+ rxd2-skew-ps = <0>; /* Skew control of RXD2 pad output */
+ rxd3-skew-ps = <0>; /* Skew control of RXD3 pad output */
+ txd0-skew-ps = <900>; /* Skew control of TXD0 pad input */
+ txd1-skew-ps = <900>; /* Skew control of TXD1 pad input */
+ txd2-skew-ps = <900>; /* Skew control of TXD2 pad input */
+ txd3-skew-ps = <900>; /* Skew control of TXD3 pad input */
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* 32MB */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+ /*
+ * This property should be removed for supporting UHS mode
+ */
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu208-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu208-revA.dts
new file mode 100644
index 000000000000..1cb8afc81285
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu208-revA.dts
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU208
+ *
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU208 RevA";
+ compatible = "xlnx,zynqmp-zcu208-revA", "xlnx,zynqmp-zcu208", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ gpio0 = &gpio;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ xlnx,eeprom = &eeprom;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>, <0x8 0x00000000 0x0 0x80000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ sw19 {
+ label = "sw19";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_DOWN>;
+ wakeup-source;
+ autorepeat;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ heartbeat_led {
+ label = "heartbeat";
+ gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ ina226-vccint {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint 0>, <&vccint 1>, <&vccint 2>, <&vccint 3>;
+ };
+ ina226-vccint-io-bram-ps {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint_io_bram_ps 0>, <&vccint_io_bram_ps 1>, <&vccint_io_bram_ps 2>, <&vccint_io_bram_ps 3>;
+ };
+ ina226-vcc1v8 {
+ compatible = "iio-hwmon";
+ io-channels = <&vcc1v8 0>, <&vcc1v8 1>, <&vcc1v8 2>, <&vcc1v8 3>;
+ };
+ ina226-vcc1v2 {
+ compatible = "iio-hwmon";
+ io-channels = <&vcc1v2 0>, <&vcc1v2 1>, <&vcc1v2 2>, <&vcc1v2 3>;
+ };
+ ina226-vadj-fmc {
+ compatible = "iio-hwmon";
+ io-channels = <&vadj_fmc 0>, <&vadj_fmc 1>, <&vadj_fmc 2>, <&vadj_fmc 3>;
+ };
+ ina226-mgtavcc {
+ compatible = "iio-hwmon";
+ io-channels = <&mgtavcc 0>, <&mgtavcc 1>, <&mgtavcc 2>, <&mgtavcc 3>;
+ };
+ ina226-mgt1v2 {
+ compatible = "iio-hwmon";
+ io-channels = <&mgt1v2 0>, <&mgt1v2 1>, <&mgt1v2 2>, <&mgt1v2 3>;
+ };
+ ina226-mgt1v8 {
+ compatible = "iio-hwmon";
+ io-channels = <&mgt1v8 0>, <&mgt1v8 1>, <&mgt1v8 2>, <&mgt1v8 3>;
+ };
+ ina226-vccint-ams {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint_ams 0>, <&vccint_ams 1>, <&vccint_ams 2>, <&vccint_ams 3>;
+ };
+ ina226-dac-avtt {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avtt 0>, <&dac_avtt 1>, <&dac_avtt 2>, <&dac_avtt 3>;
+ };
+ ina226-dac-avccaux {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avccaux 0>, <&dac_avccaux 1>, <&dac_avccaux 2>, <&dac_avccaux 3>;
+ };
+ ina226-adc-avcc {
+ compatible = "iio-hwmon";
+ io-channels = <&adc_avcc 0>, <&adc_avcc 1>, <&adc_avcc 2>, <&adc_avcc 3>;
+ };
+ ina226-adc-avccaux {
+ compatible = "iio-hwmon";
+ io-channels = <&adc_avccaux 0>, <&adc_avccaux 1>, <&adc_avccaux 2>, <&adc_avccaux 3>;
+ };
+ ina226-dac-avcc {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avcc 0>, <&dac_avcc 1>, <&dac_avcc 2>, <&dac_avcc 3>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ phy0: ethernet-phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names = "QSPI_LWR_CLK", "QSPI_LWR_DQ1", "QSPI_LWR_DQ2", "QSPI_LWR_DQ3", "QSPI_LWR_DQ0", /* 0 - 4 */
+ "QSPI_LWR_CS_B", "", "QSPI_UPR_CS_B", "QSPI_UPR_DQ0", "QSPI_UPR_DQ1", /* 5 - 9 */
+ "QSPI_UPR_DQ2", "QSPI_UPR_DQ3", "QSPI_UPR_CLK", "PS_GPIO2", "I2C0_SCL", /* 10 - 14 */
+ "I2C0_SDA", "I2C1_SCL", "I2C1_SDA", "UART0_TXD", "UART0_RXD", /* 15 - 19 */
+ "", "", "BUTTON", "LED", "", /* 20 - 24 */
+ "", "PMU_INPUT", "", "", "", /* 25 - 29 */
+ "", "", "PMU_GPO0", "PMU_GPO1", "PMU_GPO2", /* 30 - 34 */
+ "PMU_GPO3", "PMU_GPO4", "PMU_GPO5", "PS_GPIO1", "SDIO_SEL", /* 35 - 39 */
+ "SDIO_DIR_CMD", "SDIO_DIR_DAT0", "SDIO_DIR_DAT1", "", "", /* 40 - 44 */
+ "SDIO_DETECT", "SDIO_DAT0", "SDIO_DAT1", "SDIO_DAT2", "SDIO_DAT3", /* 45 - 49 */
+ "SDIO_CMD", "SDIO_CLK", "USB_CLK", "USB_DIR", "USB_DATA2", /* 50 - 54 */
+ "USB_NXT", "USB_DATA0", "USB_DATA1", "USB_STP", "USB_DATA3", /* 55 - 59 */
+ "USB_DATA4", "USB_DATA5", "USB_DATA6", "USB_DATA7", "ENET_TX_CLK", /* 60 - 64 */
+ "ENET_TX_D0", "ENET_TX_D1", "ENET_TX_D2", "ENET_TX_D3", "ENET_TX_CTRL", /* 65 - 69 */
+ "ENET_RX_CLK", "ENET_RX_D0", "ENET_RX_D1", "ENET_RX_D2", "ENET_RX_D3", /* 70 - 74 */
+ "ENET_RX_CTRL", "ENET_MDC", "ENET_MDIO", /* 75 - 77, MIO end and EMIO start */
+ "", "", /* 78 - 79 */
+ "", "", "", "", "", /* 80 - 84 */
+ "", "", "", "", "", /* 85 -89 */
+ "", "", "", "", "", /* 90 - 94 */
+ "", "", "", "", "", /* 95 - 99 */
+ "", "", "", "", "", /* 100 - 104 */
+ "", "", "", "", "", /* 105 - 109 */
+ "", "", "", "", "", /* 110 - 114 */
+ "", "", "", "", "", /* 115 - 119 */
+ "", "", "", "", "", /* 120 - 124 */
+ "", "", "", "", "", /* 125 - 129 */
+ "", "", "", "", "", /* 130 - 134 */
+ "", "", "", "", "", /* 135 - 139 */
+ "", "", "", "", "", /* 140 - 144 */
+ "", "", "", "", "", /* 145 - 149 */
+ "", "", "", "", "", /* 150 - 154 */
+ "", "", "", "", "", /* 155 - 159 */
+ "", "", "", "", "", /* 160 - 164 */
+ "", "", "", "", "", /* 165 - 169 */
+ "", "", "", ""; /* 170 - 174 */
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
+
+ tca6416_u15: gpio@20 { /* u15 */
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller; /* interrupt not connected */
+ #gpio-cells = <2>;
+ gpio-line-names = "MAX6643_OT_B", "MAX6643_FANFAIL_B", "MIO26_PMU_INPUT_LS", "DAC_AVTT_VOUT_SEL", /* 0 - 3 */
+ "", "IIC_MUX_RESET_B", "GEM3_EXP_RESET_B", "MAX6643_FULL_SPEED", /* 4 - 7 */
+ "FMCP_HSPC_PRSNT_M2C_B", "", "", "VCCINT_VRHOT_B", /* 10 - 13 */
+ "", "8A34001_EXP_RST_B", "IRPS5401_ALERT_B", "INA226_PMBUS_ALERT"; /* 14 - 17 */
+ };
+
+ i2c-mux@75 { /* u17 */
+ compatible = "nxp,pca9544";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* PS_PMBUS */
+ /* PMBUS_ALERT done via pca9544 */
+ vccint: ina226@40 { /* u65 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint";
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ vccint_io_bram_ps: ina226@41 { /* u57 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint-io-bram-ps";
+ reg = <0x41>;
+ shunt-resistor = <5000>;
+ };
+ vcc1v8: ina226@42 { /* u60 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vcc1v8";
+ reg = <0x42>;
+ shunt-resistor = <2000>;
+ };
+ vcc1v2: ina226@43 { /* u58 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vcc1v2";
+ reg = <0x43>;
+ shunt-resistor = <5000>;
+ };
+ vadj_fmc: ina226@45 { /* u62 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vadj-fmc";
+ reg = <0x45>;
+ shunt-resistor = <5000>;
+ };
+ mgtavcc: ina226@46 { /* u67 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgtavcc";
+ reg = <0x46>;
+ shunt-resistor = <2000>;
+ };
+ mgt1v2: ina226@47 { /* u63 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgt1v2";
+ reg = <0x47>;
+ shunt-resistor = <5000>;
+ };
+ mgt1v8: ina226@48 { /* u64 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgt1v8";
+ reg = <0x48>;
+ shunt-resistor = <5000>;
+ };
+ vccint_ams: ina226@49 { /* u61 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint-ams";
+ reg = <0x49>;
+ shunt-resistor = <5000>;
+ };
+ dac_avtt: ina226@4a { /* u59 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avtt";
+ reg = <0x4a>;
+ shunt-resistor = <5000>;
+ };
+ dac_avccaux: ina226@4b { /* u124 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avccaux";
+ reg = <0x4b>;
+ shunt-resistor = <5000>;
+ };
+ adc_avcc: ina226@4c { /* u75 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-adc-avcc";
+ reg = <0x4c>;
+ shunt-resistor = <5000>;
+ };
+ adc_avccaux: ina226@4d { /* u71 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-adc-avccaux";
+ reg = <0x4d>;
+ shunt-resistor = <5000>;
+ };
+ dac_avcc: ina226@4e { /* u77 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avcc";
+ reg = <0x4e>;
+ shunt-resistor = <5000>;
+ };
+ };
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ /* NC */
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* u104 - ir35215 0x10/0x40 */
+ /* u127 - ir38164 0x1b/0x4b */
+ /* u112 - ir38164 0x13/0x43 */
+ /* u123 - ir38164 0x1c/0x4c */
+
+ irps5401_44: irps5401@44 { /* IRPS5401 - u53 */
+ compatible = "infineon,irps5401";
+ reg = <0x44>; /* i2c addr 0x14 */
+ };
+ irps5401_45: irps5401@45 { /* IRPS5401 - u55 */
+ compatible = "infineon,irps5401";
+ reg = <0x45>; /* i2c addr 0x15 */
+ };
+ /* J21 header too */
+
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* SYSMON */
+ };
+ };
+ /* u38 MPS430 */
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+
+ i2c-mux@74 {
+ compatible = "nxp,pca9548"; /* u20 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ /* FIXME reset-gpios = <&tca6416_u15 SYSCTLR_IIC_MUX0_RESET_B GPIO_ACTIVE_HIGH>; */
+ i2c_eeprom: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u21 */
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+ i2c_si5341: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ si5341: clock-generator@36 { /* SI5341 - u43 */
+ compatible = "si5341";
+ reg = <0x36>;
+ };
+
+ };
+ i2c_si570_user_c0: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ si570_1: clock-generator@5d { /* USER C0 SI570 - u47 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <300000000>;
+ clock-frequency = <300000000>;
+ clock-output-names = "si570_user_c0";
+ };
+ };
+ i2c_si570_mgt: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ si570_2: clock-generator@5d { /* USER MGT SI570 - u48 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <156250000>;
+ clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
+ };
+ };
+ i2c_8a34001: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* U409B - 8a34001 */
+ };
+ i2c_clk104: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* CLK104_SDA */
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* RFMCP connector */
+ };
+ /* 7 NC */
+ };
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9548"; /* u22 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+ /* FIXME reset-gpios = <&tca6416_u15 SYSCTLR_IIC_MUX0_RESET_B GPIO_ACTIVE_HIGH>; */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* FMCP_HSPC_IIC */
+ };
+ i2c_si570_user_c1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ si570_3: clock-generator@5d { /* USER C1 SI570 - u130 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <300000000>;
+ clock-frequency = <300000000>;
+ clock-output-names = "si570_user_c1";
+ };
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* SYSMON */
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* DDR4 SODIMM */
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* SFP3 */
+ };
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* SFP2 */
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* SFP1 */
+ };
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ /* SFP0 */
+ };
+ };
+ /* MSP430 */
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* U11 and U12 MT25QU02GCBBE12 1Gb */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 125000000>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ disable-wp;
+ /*
+ * This property should be removed for supporting UHS mode
+ */
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu216-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu216-revA.dts
new file mode 100644
index 000000000000..045cf715b708
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu216-revA.dts
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * dts file for Xilinx ZynqMP ZCU216
+ *
+ * (C) Copyright 2017 - 2020, Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+/dts-v1/;
+
+#include "zynqmp.dtsi"
+#include "zynqmp-clk-ccf.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ model = "ZynqMP ZCU216 RevA";
+ compatible = "xlnx,zynqmp-zcu216-revA", "xlnx,zynqmp-zcu216", "xlnx,zynqmp";
+
+ aliases {
+ ethernet0 = &gem3;
+ gpio0 = &gpio;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ mmc0 = &sdhci1;
+ rtc0 = &rtc;
+ serial0 = &uart0;
+ serial1 = &dcc;
+ spi0 = &qspi;
+ usb0 = &usb0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "serial0:115200n8";
+ xlnx,eeprom = <&eeprom>;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>, <0x8 0x00000000 0x0 0x80000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ sw19 {
+ label = "sw19";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_DOWN>;
+ wakeup-source;
+ autorepeat;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ heartbeat_led {
+ label = "heartbeat";
+ gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ ina226-vccint {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint 0>, <&vccint 1>, <&vccint 2>, <&vccint 3>;
+ };
+ ina226-vccint-io-bram-ps {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint_io_bram_ps 0>, <&vccint_io_bram_ps 1>, <&vccint_io_bram_ps 2>, <&vccint_io_bram_ps 3>;
+ };
+ ina226-vcc1v8 {
+ compatible = "iio-hwmon";
+ io-channels = <&vcc1v8 0>, <&vcc1v8 1>, <&vcc1v8 2>, <&vcc1v8 3>;
+ };
+ ina226-vcc1v2 {
+ compatible = "iio-hwmon";
+ io-channels = <&vcc1v2 0>, <&vcc1v2 1>, <&vcc1v2 2>, <&vcc1v2 3>;
+ };
+ ina226-vadj-fmc {
+ compatible = "iio-hwmon";
+ io-channels = <&vadj_fmc 0>, <&vadj_fmc 1>, <&vadj_fmc 2>, <&vadj_fmc 3>;
+ };
+ ina226-mgtavcc {
+ compatible = "iio-hwmon";
+ io-channels = <&mgtavcc 0>, <&mgtavcc 1>, <&mgtavcc 2>, <&mgtavcc 3>;
+ };
+ ina226-mgt1v2 {
+ compatible = "iio-hwmon";
+ io-channels = <&mgt1v2 0>, <&mgt1v2 1>, <&mgt1v2 2>, <&mgt1v2 3>;
+ };
+ ina226-mgt1v8 {
+ compatible = "iio-hwmon";
+ io-channels = <&mgt1v8 0>, <&mgt1v8 1>, <&mgt1v8 2>, <&mgt1v8 3>;
+ };
+ ina226-vccint-ams {
+ compatible = "iio-hwmon";
+ io-channels = <&vccint_ams 0>, <&vccint_ams 1>, <&vccint_ams 2>, <&vccint_ams 3>;
+ };
+ ina226-dac-avtt {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avtt 0>, <&dac_avtt 1>, <&dac_avtt 2>, <&dac_avtt 3>;
+ };
+ ina226-dac-avccaux {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avccaux 0>, <&dac_avccaux 1>, <&dac_avccaux 2>, <&dac_avccaux 3>;
+ };
+ ina226-adc-avcc {
+ compatible = "iio-hwmon";
+ io-channels = <&adc_avcc 0>, <&adc_avcc 1>, <&adc_avcc 2>, <&adc_avcc 3>;
+ };
+ ina226-adc-avccaux {
+ compatible = "iio-hwmon";
+ io-channels = <&adc_avccaux 0>, <&adc_avccaux 1>, <&adc_avccaux 2>, <&adc_avccaux 3>;
+ };
+ ina226-dac-avcc {
+ compatible = "iio-hwmon";
+ io-channels = <&dac_avcc 0>, <&dac_avcc 1>, <&dac_avcc 2>, <&dac_avcc 3>;
+ };
+};
+
+&dcc {
+ status = "okay";
+};
+
+&fpd_dma_chan1 {
+ status = "okay";
+};
+
+&fpd_dma_chan2 {
+ status = "okay";
+};
+
+&fpd_dma_chan3 {
+ status = "okay";
+};
+
+&fpd_dma_chan4 {
+ status = "okay";
+};
+
+&fpd_dma_chan5 {
+ status = "okay";
+};
+
+&fpd_dma_chan6 {
+ status = "okay";
+};
+
+&fpd_dma_chan7 {
+ status = "okay";
+};
+
+&fpd_dma_chan8 {
+ status = "okay";
+};
+
+&gem3 {
+ status = "okay";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ phy0: ethernet-phy@c {
+ reg = <0xc>;
+ ti,rx-internal-delay = <0x8>;
+ ti,tx-internal-delay = <0xa>;
+ ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
+ };
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names = "QSPI_LWR_CLK", "QSPI_LWR_DQ1", "QSPI_LWR_DQ2", "QSPI_LWR_DQ3", "QSPI_LWR_DQ0", /* 0 - 4 */
+ "QSPI_LWR_CS_B", "", "QSPI_UPR_CS_B", "QSPI_UPR_DQ0", "QSPI_UPR_DQ1", /* 5 - 9 */
+ "QSPI_UPR_DQ2", "QSPI_UPR_DQ3", "QSPI_UPR_CLK", "PS_GPIO2", "I2C0_SCL", /* 10 - 14 */
+ "I2C0_SDA", "I2C1_SCL", "I2C1_SDA", "UART0_TXD", "UART0_RXD", /* 15 - 19 */
+ "", "", "BUTTON", "LED", "", /* 20 - 24 */
+ "", "PMU_INPUT", "", "", "", /* 25 - 29 */
+ "", "", "PMU_GPO0", "PMU_GPO1", "PMU_GPO2", /* 30 - 34 */
+ "PMU_GPO3", "PMU_GPO4", "PMU_GPO5", "PS_GPIO1", "SDIO_SEL", /* 35 - 39 */
+ "SDIO_DIR_CMD", "SDIO_DIR_DAT0", "SDIO_DIR_DAT1", "", "", /* 40 - 44 */
+ "SDIO_DETECT", "SDIO_DAT0", "SDIO_DAT1", "SDIO_DAT2", "SDIO_DAT3", /* 45 - 49 */
+ "SDIO_CMD", "SDIO_CLK", "USB_CLK", "USB_DIR", "USB_DATA2", /* 50 - 54 */
+ "USB_NXT", "USB_DATA0", "USB_DATA1", "USB_STP", "USB_DATA3", /* 55 - 59 */
+ "USB_DATA4", "USB_DATA5", "USB_DATA6", "USB_DATA7", "ENET_TX_CLK", /* 60 - 64 */
+ "ENET_TX_D0", "ENET_TX_D1", "ENET_TX_D2", "ENET_TX_D3", "ENET_TX_CTRL", /* 65 - 69 */
+ "ENET_RX_CLK", "ENET_RX_D0", "ENET_RX_D1", "ENET_RX_D2", "ENET_RX_D3", /* 70 - 74 */
+ "ENET_RX_CTRL", "ENET_MDC", "ENET_MDIO", /* 75 - 77, MIO end and EMIO start */
+ "", "", /* 78 - 79 */
+ "", "", "", "", "", /* 80 - 84 */
+ "", "", "", "", "", /* 85 -89 */
+ "", "", "", "", "", /* 90 - 94 */
+ "", "", "", "", "", /* 95 - 99 */
+ "", "", "", "", "", /* 100 - 104 */
+ "", "", "", "", "", /* 105 - 109 */
+ "", "", "", "", "", /* 110 - 114 */
+ "", "", "", "", "", /* 115 - 119 */
+ "", "", "", "", "", /* 120 - 124 */
+ "", "", "", "", "", /* 125 - 129 */
+ "", "", "", "", "", /* 130 - 134 */
+ "", "", "", "", "", /* 135 - 139 */
+ "", "", "", "", "", /* 140 - 144 */
+ "", "", "", "", "", /* 145 - 149 */
+ "", "", "", "", "", /* 150 - 154 */
+ "", "", "", "", "", /* 155 - 159 */
+ "", "", "", "", "", /* 160 - 164 */
+ "", "", "", "", "", /* 165 - 169 */
+ "", "", "", ""; /* 170 - 174 */
+};
+
+&gpu {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ pinctrl-1 = <&pinctrl_i2c0_gpio>;
+ scl-gpios = <&gpio 14 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 15 GPIO_ACTIVE_HIGH>;
+
+ tca6416_u15: gpio@20 { /* u15 */
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller; /* interrupt not connected */
+ #gpio-cells = <2>;
+ gpio-line-names = "MAX6643_OT_B", "MAX6643_FANFAIL_B", "MIO26_PMU_INPUT_LS", "", /* 0 - 3 */
+ "", "IIC_MUX_RESET_B", "GEM3_EXP_RESET_B", "MAX6643_FULL_SPEED", /* 4 - 7 */
+ "FMCP_HSPC_PRSNT_M2C_B", "", "", "VCCINT_VRHOT_B", /* 10 - 13 */
+ "", "8A34001_EXP_RST_B", "IRPS5401_ALERT_B", "INA226_PMBUS_ALERT"; /* 14 - 17 */
+ };
+
+ i2c-mux@75 { /* u17 */
+ compatible = "nxp,pca9544";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* PS_PMBUS */
+ /* PMBUS_ALERT done via pca9544 */
+ vccint: ina226@40 { /* u65 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint";
+ reg = <0x40>;
+ shunt-resistor = <5000>;
+ };
+ vccint_io_bram_ps: ina226@41 { /* u57 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint-io-bram-ps";
+ reg = <0x41>;
+ shunt-resistor = <5000>;
+ };
+ vcc1v8: ina226@42 { /* u60 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vcc1v8";
+ reg = <0x42>;
+ shunt-resistor = <2000>;
+ };
+ vcc1v2: ina226@43 { /* u58 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vcc1v2";
+ reg = <0x43>;
+ shunt-resistor = <5000>;
+ };
+ vadj_fmc: ina226@45 { /* u62 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vadj-fmc";
+ reg = <0x45>;
+ shunt-resistor = <5000>;
+ };
+ mgtavcc: ina226@46 { /* u67 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgtavcc";
+ reg = <0x46>;
+ shunt-resistor = <2000>;
+ };
+ mgt1v2: ina226@47 { /* u63 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgt1v2";
+ reg = <0x47>;
+ shunt-resistor = <5000>;
+ };
+ mgt1v8: ina226@48 { /* u64 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-mgt1v8";
+ reg = <0x48>;
+ shunt-resistor = <5000>;
+ };
+ vccint_ams: ina226@49 { /* u61 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-vccint-ams";
+ reg = <0x49>;
+ shunt-resistor = <5000>;
+ };
+ dac_avtt: ina226@4a { /* u59 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avtt";
+ reg = <0x4a>;
+ shunt-resistor = <5000>;
+ };
+ dac_avccaux: ina226@4b { /* u124 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avccaux";
+ reg = <0x4b>;
+ shunt-resistor = <5000>;
+ };
+ adc_avcc: ina226@4c { /* u75 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-adc-avcc";
+ reg = <0x4c>;
+ shunt-resistor = <5000>;
+ };
+ adc_avccaux: ina226@4d { /* u71 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-adc-avccaux";
+ reg = <0x4d>;
+ shunt-resistor = <5000>;
+ };
+ dac_avcc: ina226@4e { /* u77 */
+ compatible = "ti,ina226";
+ #io-channel-cells = <1>;
+ label = "ina226-dac-avcc";
+ reg = <0x4e>;
+ shunt-resistor = <5000>;
+ };
+ };
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ /* NC */
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* u104 - ir35215 0x10/0x40 */
+ /* u127 - ir38164 0x1b/0x4b */
+ /* u112 - ir38164 0x13/0x43 */
+ /* u123 - ir38164 0x1c/0x4c */
+
+ irps5401_44: irps5401@44 { /* IRPS5401 - u53 */
+ compatible = "infineon,irps5401";
+ reg = <0x44>; /* i2c addr 0x14 */
+ };
+ irps5401_45: irps5401@45 { /* IRPS5401 - u55 */
+ compatible = "infineon,irps5401";
+ reg = <0x45>; /* i2c addr 0x15 */
+ };
+ /* J21 header too */
+
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* SYSMON */
+ };
+ };
+ /* u38 MPS430 */
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio 16 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+
+ i2c-mux@74 {
+ compatible = "nxp,pca9548"; /* u20 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x74>;
+ /* FIXME reset-gpios = <&tca6416_u15 SYSCTLR_IIC_MUX0_RESET_B GPIO_ACTIVE_HIGH>; */
+ i2c_eeprom: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /*
+ * IIC_EEPROM 1kB memory which uses 256B blocks
+ * where every block has different address.
+ * 0 - 256B address 0x54
+ * 256B - 512B address 0x55
+ * 512B - 768B address 0x56
+ * 768B - 1024B address 0x57
+ */
+ eeprom: eeprom@54 { /* u21 */
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+ };
+ i2c_si5341: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ si5341: clock-generator@36 { /* SI5341 - u43 */
+ compatible = "si5341";
+ reg = <0x36>;
+ };
+
+ };
+ i2c_si570_user_c0: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ si570_1: clock-generator@5d { /* USER C0 SI570 - u47 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <300000000>;
+ clock-frequency = <300000000>;
+ clock-output-names = "si570_user_c0";
+ };
+ };
+ i2c_si570_mgt: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ si570_2: clock-generator@5d { /* USER MGT SI570 - u48 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <156250000>;
+ clock-frequency = <148500000>;
+ clock-output-names = "si570_mgt";
+ };
+ };
+ i2c_8a34001: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* U409B - 8a34001 */
+ };
+ i2c_clk104: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* CLK104_SDA */
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* RFMCP connector */
+ };
+ /* 7 NC */
+ };
+
+ i2c-mux@75 {
+ compatible = "nxp,pca9548"; /* u22 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x75>;
+ /* FIXME reset-gpios = <&tca6416_u15 SYSCTLR_IIC_MUX0_RESET_B GPIO_ACTIVE_HIGH>; */
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ /* FMCP_HSPC_IIC */
+ };
+ i2c_si570_user_c1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ si570_3: clock-generator@5d { /* USER C1 SI570 - u130 */
+ #clock-cells = <0>;
+ compatible = "silabs,si570";
+ reg = <0x5d>;
+ temperature-stability = <50>;
+ factory-fout = <300000000>;
+ clock-frequency = <300000000>;
+ clock-output-names = "si570_user_c1";
+ };
+ };
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ /* SYSMON */
+ };
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ /* DDR4 SODIMM */
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ /* SFP3 */
+ };
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ /* SFP2 */
+ };
+ i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ /* SFP1 */
+ };
+ i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ /* SFP0 */
+ };
+ };
+ /* MSP430 */
+};
+
+&pinctrl0 {
+ status = "okay";
+ pinctrl_i2c0_default: i2c0-default {
+ mux {
+ groups = "i2c0_3_grp";
+ function = "i2c0";
+ };
+
+ conf {
+ groups = "i2c0_3_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c0_gpio: i2c0-gpio {
+ mux {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_14_grp", "gpio0_15_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_default: i2c1-default {
+ mux {
+ groups = "i2c1_4_grp";
+ function = "i2c1";
+ };
+
+ conf {
+ groups = "i2c1_4_grp";
+ bias-pull-up;
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+
+ pinctrl_i2c1_gpio: i2c1-gpio {
+ mux {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ function = "gpio0";
+ };
+
+ conf {
+ groups = "gpio0_16_grp", "gpio0_17_grp";
+ slew-rate = <SLEW_RATE_SLOW>;
+ io-standard = <IO_STANDARD_LVCMOS18>;
+ };
+ };
+};
+
+&qspi {
+ status = "okay";
+ is-dual = <1>;
+ flash@0 {
+ compatible = "m25p80", "jedec,spi-nor"; /* U11 and U12 MT25QU02GCBBE12 1Gb */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>; /* FIXME also DUAL configuration possible */
+ spi-max-frequency = <108000000>; /* Based on DC1 spec */
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+ /* SATA OOB timing settings */
+ ceva,p0-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p0-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p0-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ ceva,p1-cominit-params = /bits/ 8 <0x18 0x40 0x18 0x28>;
+ ceva,p1-comwake-params = /bits/ 8 <0x06 0x14 0x08 0x0E>;
+ ceva,p1-burst-params = /bits/ 8 <0x13 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x96A4 0x3FFC>;
+ phy-names = "sata-phy";
+ phys = <&lane3 PHY_TYPE_SATA 1 3 125000000>;
+};
+
+/* SD1 with level shifter */
+&sdhci1 {
+ status = "okay";
+ disable-wp;
+ /*
+ * This property should be removed for supporting UHS mode
+ */
+ no-1-8-v;
+ xlnx,mio_bank = <1>;
+};
+
+&serdes {
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+/* ULPI SMSC USB3320 */
+&usb0 {
+ status = "okay";
+};
+
+&dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+ snps,usb3_lpm_capable;
+ phy-names = "usb3-phy";
+ phys = <&lane2 PHY_TYPE_USB3 0 2 26000000>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 26d926eb1431..59d393aa9bb3 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -2,7 +2,7 @@
/*
* dts file for Xilinx ZynqMP
*
- * (C) Copyright 2014 - 2019, Xilinx, Inc.
+ * (C) Copyright 2014 - 2020, Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
*
@@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@@ -98,9 +99,33 @@
};
};
+ zynqmp_ipi {
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-ipi-mailbox";
+ interrupt-parent = <&gic>;
+ interrupts = <0 35 4>;
+ xlnx,ipi-id = <0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipi_mailbox_pmu1: mailbox@ff990400 {
+ u-boot,dm-pre-reloc;
+ reg = <0x0 0xff9905c0 0x0 0x20>,
+ <0x0 0xff9905e0 0x0 0x20>,
+ <0x0 0xff990e80 0x0 0x20>,
+ <0x0 0xff990ea0 0x0 0x20>;
+ reg-names = "local_request_region", "local_response_region",
+ "remote_request_region", "remote_response_region";
+ #mbox-cells = <1>;
+ xlnx,ipi-id = <4>;
+ };
+ };
+
dcc: dcc {
compatible = "arm,dcc";
status = "disabled";
+ u-boot,dm-pre-reloc;
};
pmu {
@@ -120,43 +145,32 @@
firmware {
zynqmp_firmware: zynqmp-firmware {
compatible = "xlnx,zynqmp-firmware";
- #power-domain-cells = <1>;
method = "smc";
+ #power-domain-cells = <0x1>;
+ u-boot,dm-pre-reloc;
zynqmp_power: zynqmp-power {
+ u-boot,dm-pre-reloc;
compatible = "xlnx,zynqmp-power";
interrupt-parent = <&gic>;
interrupts = <0 35 4>;
+ mboxes = <&ipi_mailbox_pmu1 0>,
+ <&ipi_mailbox_pmu1 1>;
+ mbox-names = "tx", "rx";
};
- zynqmp_clk: clock-controller {
- u-boot,dm-pre-reloc;
- #clock-cells = <1>;
- compatible = "xlnx,zynqmp-clk";
- clocks = <&pss_ref_clk>,
- <&video_clk>,
- <&pss_alt_ref_clk>,
- <&aux_ref_clk>,
- <&gt_crx_ref_clk>;
- clock-names = "pss_ref_clk",
- "video_clk",
- "pss_alt_ref_clk",
- "aux_ref_clk",
- "gt_crx_ref_clk";
+ zynqmp_pcap: pcap {
+ compatible = "xlnx,zynqmp-pcap-fpga";
};
- nvmem_firmware {
- compatible = "xlnx,zynqmp-nvmem-fw";
- #address-cells = <1>;
- #size-cells = <1>;
-
- soc_revision: soc_revision@0 {
- reg = <0x0 0x4>;
- };
+ zynqmp_reset: reset-controller {
+ compatible = "xlnx,zynqmp-reset";
+ #reset-cells = <1>;
};
- zynqmp_pcap: pcap {
- compatible = "xlnx,zynqmp-pcap-fpga";
+ pinctrl0: pinctrl {
+ compatible = "xlnx,zynqmp-pinctrl";
+ status = "disabled";
};
};
};
@@ -170,6 +184,10 @@
<1 10 0xf08>;
};
+ edac {
+ compatible = "arm,cortex-a53-edac";
+ };
+
fpga_full: fpga-full {
compatible = "fpga-region";
fpga-mgr = <&zynqmp_pcap>;
@@ -178,6 +196,77 @@
ranges;
};
+ nvmem_firmware {
+ compatible = "xlnx,zynqmp-nvmem-fw";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ soc_revision: soc_revision@0 {
+ reg = <0x0 0x4>;
+ };
+ /* efuse access */
+ efuse_dna: efuse_dna@c {
+ reg = <0xc 0xc>;
+ };
+ efuse_usr0: efuse_usr0@20 {
+ reg = <0x20 0x4>;
+ };
+ efuse_usr1: efuse_usr1@24 {
+ reg = <0x24 0x4>;
+ };
+ efuse_usr2: efuse_usr2@28 {
+ reg = <0x28 0x4>;
+ };
+ efuse_usr3: efuse_usr3@2c {
+ reg = <0x2c 0x4>;
+ };
+ efuse_usr4: efuse_usr4@30 {
+ reg = <0x30 0x4>;
+ };
+ efuse_usr5: efuse_usr5@34 {
+ reg = <0x34 0x4>;
+ };
+ efuse_usr6: efuse_usr6@38 {
+ reg = <0x38 0x4>;
+ };
+ efuse_usr7: efuse_usr7@3c {
+ reg = <0x3c 0x4>;
+ };
+ efuse_miscusr: efuse_miscusr@40 {
+ reg = <0x40 0x4>;
+ };
+ efuse_chash: efuse_chash@50 {
+ reg = <0x50 0x4>;
+ };
+ efuse_pufmisc: efuse_pufmisc@54 {
+ reg = <0x54 0x4>;
+ };
+ efuse_sec: efuse_sec@58 {
+ reg = <0x58 0x4>;
+ };
+ efuse_spkid: efuse_spkid@5c {
+ reg = <0x5c 0x4>;
+ };
+ efuse_ppk0hash: efuse_ppk0hash@a0 {
+ reg = <0xa0 0x30>;
+ };
+ efuse_ppk1hash: efuse_ppk1hash@d0 {
+ reg = <0xd0 0x30>;
+ };
+ };
+
+ xlnx_rsa: zynqmp_rsa {
+ compatible = "xlnx,zynqmp-rsa";
+ };
+
+ xlnx_keccak_384: sha384 {
+ compatible = "xlnx,zynqmp-keccak-384";
+ };
+
+ xlnx_aes: zynqmp_aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
+
amba_apu: amba-apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
@@ -185,7 +274,7 @@
ranges = <0 0 0 0 0xffffffff>;
gic: interrupt-controller@f9010000 {
- compatible = "arm,gic-400", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
#interrupt-cells = <3>;
reg = <0x0 0xf9010000 0x10000>,
<0x0 0xf9020000 0x20000>,
@@ -197,8 +286,23 @@
};
};
+ smmu: smmu@fd800000 {
+ compatible = "arm,mmu-500";
+ reg = <0x0 0xfd800000 0x0 0x20000>;
+ #iommu-cells = <1>;
+ status = "disabled";
+ #global-interrupts = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
+ <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
+ };
+
amba: amba {
compatible = "simple-bus";
+ u-boot,dm-pre-reloc;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -255,6 +359,8 @@
interrupts = <0 124 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e8>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -266,6 +372,8 @@
interrupts = <0 125 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14e9>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -277,6 +385,8 @@
interrupts = <0 126 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ea>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -288,6 +398,8 @@
interrupts = <0 127 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14eb>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -299,6 +411,8 @@
interrupts = <0 128 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ec>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -310,6 +424,8 @@
interrupts = <0 129 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ed>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -321,6 +437,8 @@
interrupts = <0 130 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ee>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
@@ -332,9 +450,22 @@
interrupts = <0 131 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <128>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x14ef>;
power-domains = <&zynqmp_firmware PD_GDMA>;
};
+ gpu: gpu@fd4b0000 {
+ status = "disabled";
+ compatible = "arm,mali-400", "arm,mali-utgard";
+ reg = <0x0 0xfd4b0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>, <0 132 4>;
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", "IRQPPMMU0", "IRQPP1", "IRQPPMMU1";
+ clock-names = "gpu", "gpu_pp0", "gpu_pp1";
+ power-domains = <&zynqmp_firmware PD_GPU>;
+ };
+
/* LPDDMA default allows only secured access. inorder to enable
* These dma channels, Users should ensure that these dma
* Channels are allowed for non secure access.
@@ -347,6 +478,8 @@
interrupts = <0 77 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x868>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -358,6 +491,8 @@
interrupts = <0 78 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x869>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -369,6 +504,8 @@
interrupts = <0 79 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86a>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -380,6 +517,8 @@
interrupts = <0 80 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86b>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -391,6 +530,8 @@
interrupts = <0 81 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86c>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -402,6 +543,8 @@
interrupts = <0 82 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86d>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -413,6 +556,8 @@
interrupts = <0 83 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86e>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -424,6 +569,8 @@
interrupts = <0 84 4>;
clock-names = "clk_main", "clk_apb";
xlnx,bus-width = <64>;
+ #stream-id-cells = <1>;
+ /* iommus = <&smmu 0x86f>; */
power-domains = <&zynqmp_firmware PD_ADMA>;
};
@@ -434,6 +581,85 @@
interrupts = <0 112 4>;
};
+ ocm: memory-controller@ff960000 {
+ compatible = "xlnx,zynqmp-ocmc-1.0";
+ reg = <0x0 0xff960000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 10 4>;
+ };
+
+ perf_monitor_ocm: perf-monitor@ffa00000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa00000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_ddr: perf-monitor@fd0b0000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd0b0000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <6>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <10>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_cci: perf-monitor@fd490000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xfd490000 0x0 0x10000>;
+ interrupts = <0 123 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <0>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
+ perf_monitor_lpd: perf-monitor@ffa10000 {
+ compatible = "xlnx,axi-perf-monitor";
+ reg = <0x0 0xffa10000 0x0 0x10000>;
+ interrupts = <0 25 4>;
+ interrupt-parent = <&gic>;
+ xlnx,enable-profile = <0>;
+ xlnx,enable-trace = <0>;
+ xlnx,num-monitor-slots = <1>;
+ xlnx,enable-event-count = <1>;
+ xlnx,enable-event-log = <1>;
+ xlnx,have-sampled-metric-cnt = <1>;
+ xlnx,num-of-counters = <8>;
+ xlnx,metric-count-width = <32>;
+ xlnx,metrics-sample-count-width = <32>;
+ xlnx,global-count-width = <32>;
+ xlnx,metric-count-scale = <1>;
+ };
+
gem0: ethernet@ff0b0000 {
compatible = "cdns,zynqmp-gem", "cdns,gem";
status = "disabled";
@@ -443,6 +669,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x874>;
power-domains = <&zynqmp_firmware PD_ETH_0>;
};
@@ -455,6 +683,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x875>;
power-domains = <&zynqmp_firmware PD_ETH_1>;
};
@@ -467,6 +697,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x876>;
power-domains = <&zynqmp_firmware PD_ETH_2>;
};
@@ -479,6 +711,8 @@
clock-names = "pclk", "hclk", "tx_clk";
#address-cells = <1>;
#size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x877>;
power-domains = <&zynqmp_firmware PD_ETH_3>;
};
@@ -517,6 +751,17 @@
power-domains = <&zynqmp_firmware PD_I2C_1>;
};
+ nand0: nand@ff100000 {
+ compatible = "arasan,nfc-v3p10";
+ status = "disabled";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ clock-names = "clk_sys", "clk_flash";
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
pcie: pcie@fd0e0000 {
compatible = "xlnx,nwl-pcie-2.11";
status = "disabled";
@@ -554,6 +799,23 @@
};
};
+ qspi: spi@ff0f0000 {
+ u-boot,dm-pre-reloc;
+ compatible = "xlnx,zynqmp-qspi-1.0";
+ status = "disabled";
+ clock-names = "ref_clk", "pclk";
+ interrupts = <0 15 4>;
+ interrupt-parent = <&gic>;
+ num-cs = <1>;
+ reg = <0x0 0xff0f0000 0x0 0x1000>,
+ <0x0 0xc0000000 0x0 0x8000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x873>;
+ power-domains = <&zynqmp_firmware PD_QSPI>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -564,6 +826,44 @@
calibration = <0x8000>;
};
+ serdes: zynqmp_phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+ resets = <&zynqmp_reset ZYNQMP_RESET_SATA>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB0_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_APB>,
+ <&zynqmp_reset ZYNQMP_RESET_DP>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM0>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM1>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM2>,
+ <&zynqmp_reset ZYNQMP_RESET_GEM3>;
+ reset-names = "sata_rst", "usb0_crst", "usb1_crst",
+ "usb0_hibrst", "usb1_hibrst", "usb0_apbrst",
+ "usb1_apbrst", "dp_rst", "gem0_rst",
+ "gem1_rst", "gem2_rst", "gem3_rst";
+ lane0: lane0 {
+ #phy-cells = <4>;
+ };
+ lane1: lane1 {
+ #phy-cells = <4>;
+ };
+ lane2: lane2 {
+ #phy-cells = <4>;
+ };
+ lane3: lane3 {
+ #phy-cells = <4>;
+ };
+ };
+
sata: ahci@fd0c0000 {
compatible = "ceva,ahci-1v84";
status = "disabled";
@@ -571,45 +871,48 @@
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
power-domains = <&zynqmp_firmware PD_SATA>;
+ #stream-id-cells = <4>;
+ /* iommus = <&smmu 0x4c0>, <&smmu 0x4c1>, */
+ /* <&smmu 0x4c2>, <&smmu 0x4c3>; */
+ /* dma-coherent; */
};
sdhci0: mmc@ff160000 {
+ u-boot,dm-pre-reloc;
compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 48 4>;
reg = <0x0 0xff160000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ xlnx,device_id = <0>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x870>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
#clock-cells = <1>;
clock-output-names = "clk_out_sd0", "clk_in_sd0";
power-domains = <&zynqmp_firmware PD_SD_0>;
};
sdhci1: mmc@ff170000 {
+ u-boot,dm-pre-reloc;
compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 49 4>;
reg = <0x0 0xff170000 0x0 0x1000>;
clock-names = "clk_xin", "clk_ahb";
+ xlnx,device_id = <1>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x871>;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
#clock-cells = <1>;
clock-output-names = "clk_out_sd1", "clk_in_sd1";
power-domains = <&zynqmp_firmware PD_SD_1>;
};
- smmu: smmu@fd800000 {
- compatible = "arm,mmu-500";
- reg = <0x0 0xfd800000 0x0 0x20000>;
- status = "disabled";
- #global-interrupts = <1>;
- interrupt-parent = <&gic>;
- interrupts = <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>,
- <0 155 4>, <0 155 4>, <0 155 4>, <0 155 4>;
- };
-
spi0: spi@ff040000 {
compatible = "cdns,spi-r1p6";
status = "disabled";
@@ -675,6 +978,7 @@
};
uart0: serial@ff000000 {
+ u-boot,dm-pre-reloc;
compatible = "cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
@@ -685,6 +989,7 @@
};
uart1: serial@ff010000 {
+ u-boot,dm-pre-reloc;
compatible = "cdns,uart-r1p12", "xlnx,xuartps";
status = "disabled";
interrupt-parent = <&gic>;
@@ -694,24 +999,65 @@
power-domains = <&zynqmp_firmware PD_UART_1>;
};
- usb0: usb@fe200000 {
- compatible = "snps,dwc3";
+ usb0: usb0@ff9d0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 65 4>;
- reg = <0x0 0xfe200000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
power-domains = <&zynqmp_firmware PD_USB_0>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_0: dwc3@fe200000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe200000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 65 4>, <0 69 4>, <0 75 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x860>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ /* snps,enable-hibernation; */
+ };
};
- usb1: usb@fe300000 {
- compatible = "snps,dwc3";
+ usb1: usb1@ff9e0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 70 4>;
- reg = <0x0 0xfe300000 0x0 0x40000>;
- clock-names = "clk_xin", "clk_ahb";
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9e0000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
power-domains = <&zynqmp_firmware PD_USB_1>;
+ ranges;
+ nvmem-cells = <&soc_revision>;
+ nvmem-cell-names = "soc_revision";
+
+ dwc3_1: dwc3@fe300000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xfe300000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupt-names = "dwc_usb3", "otg", "hiber";
+ interrupts = <0 70 4>, <0 74 4>, <0 76 4>;
+ #stream-id-cells = <1>;
+ iommus = <&smmu 0x861>;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,refclk_fladj;
+ snps,enable_guctl1_resume_quirk;
+ snps,enable_guctl1_ipd_quirk;
+ snps,xhci-stream-quirk;
+ /* dma-coherent; */
+ };
};
watchdog0: watchdog@fd4d0000 {
@@ -720,7 +1066,130 @@
interrupt-parent = <&gic>;
interrupts = <0 113 1>;
reg = <0x0 0xfd4d0000 0x0 0x1000>;
+ timeout-sec = <60>;
+ reset-on-timeout;
+ };
+
+ lpd_watchdog: watchdog@ff150000 {
+ compatible = "cdns,wdt-r1p2";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 52 1>;
+ reg = <0x0 0xff150000 0x0 0x1000>;
timeout-sec = <10>;
};
+
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ interrupt-names = "ams-irq";
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ reg-names = "ams-base";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #io-channel-cells = <1>;
+ ranges;
+
+ ams_ps: ams_ps@ffa50800 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ status = "disabled";
+ reg = <0x0 0xffa50800 0x0 0x400>;
+ };
+
+ ams_pl: ams_pl@ffa50c00 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ status = "disabled";
+ reg = <0x0 0xffa50c00 0x0 0x400>;
+ };
+ };
+
+ xlnx_dpdma: dma@fd4c0000 {
+ compatible = "xlnx,dpdma";
+ status = "disabled";
+ reg = <0x0 0xfd4c0000 0x0 0x1000>;
+ interrupts = <0 122 4>;
+ interrupt-parent = <&gic>;
+ clock-names = "axi_clk";
+ power-domains = <&zynqmp_firmware PD_DP>;
+ dma-channels = <6>;
+ #dma-cells = <1>;
+ dma-video0channel {
+ compatible = "xlnx,video0";
+ };
+ dma-video1channel {
+ compatible = "xlnx,video1";
+ };
+ dma-video2channel {
+ compatible = "xlnx,video2";
+ };
+ dma-graphicschannel {
+ compatible = "xlnx,graphics";
+ };
+ dma-audio0channel {
+ compatible = "xlnx,audio0";
+ };
+ dma-audio1channel {
+ compatible = "xlnx,audio1";
+ };
+ };
+
+ zynqmp_dpsub: zynqmp-display@fd4a0000 {
+ compatible = "xlnx,zynqmp-dpsub-1.7";
+ status = "disabled";
+ reg = <0x0 0xfd4a0000 0x0 0x1000>,
+ <0x0 0xfd4aa000 0x0 0x1000>,
+ <0x0 0xfd4ab000 0x0 0x1000>,
+ <0x0 0xfd4ac000 0x0 0x1000>;
+ reg-names = "dp", "blend", "av_buf", "aud";
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+
+ clock-names = "dp_apb_clk", "dp_aud_clk",
+ "dp_vtc_pixel_clk_in";
+
+ power-domains = <&zynqmp_firmware PD_DP>;
+
+ vid-layer {
+ dma-names = "vid0", "vid1", "vid2";
+ dmas = <&xlnx_dpdma 0>,
+ <&xlnx_dpdma 1>,
+ <&xlnx_dpdma 2>;
+ };
+
+ gfx-layer {
+ dma-names = "gfx0";
+ dmas = <&xlnx_dpdma 3>;
+ };
+
+ /* dummy node to indicate there's no child i2c device */
+ i2c-bus {
+ };
+
+ zynqmp_dp_snd_codec0: zynqmp_dp_snd_codec0 {
+ compatible = "xlnx,dp-snd-codec";
+ clock-names = "aud_clk";
+ };
+
+ zynqmp_dp_snd_pcm0: zynqmp_dp_snd_pcm0 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 4>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_pcm1: zynqmp_dp_snd_pcm1 {
+ compatible = "xlnx,dp-snd-pcm";
+ dmas = <&xlnx_dpdma 5>;
+ dma-names = "tx";
+ };
+
+ zynqmp_dp_snd_card0: zynqmp_dp_snd_card {
+ compatible = "xlnx,dp-snd-card";
+ xlnx,dp-snd-pcm = <&zynqmp_dp_snd_pcm0>,
+ <&zynqmp_dp_snd_pcm1>;
+ xlnx,dp-snd-codec = <&zynqmp_dp_snd_codec0>;
+ };
+ };
};
};
diff --git a/arch/arm64/configs/xilinx_defconfig b/arch/arm64/configs/xilinx_defconfig
new file mode 100644
index 000000000000..e4f2cbb9a9bf
--- /dev/null
+++ b/arch/arm64/configs/xilinx_defconfig
@@ -0,0 +1,429 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_NR_CPUS=4
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ZYNQMP_FIRMWARE_DEBUG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=y
+CONFIG_CRYPTO_SHA3_ARM64=y
+CONFIG_CRYPTO_SM3_ARM64_CE=y
+CONFIG_CRYPTO_SM4_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_CHACHA20_NEON=y
+CONFIG_CRYPTO_AES_ARM64_BS=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_SPARSEMEM_VMEMMAP is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+CONFIG_BT_HCIVHCI=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_BT_ATH3K=y
+CONFIG_BT_WILINK=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_MESSAGE_TRACING=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX_NWL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_ARASAN=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_CADENCE_QUADSPI=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SRAM=y
+CONFIG_XILINX_SDFEC=y
+CONFIG_XILINX_JESD204B=y
+CONFIG_XILINX_JESD204B_PHY=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_TI_ST=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_AMD_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_BCM7XXX_PHY=y
+CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DP83848_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_STE10XP=y
+CONFIG_VITESSE_PHY=y
+CONFIG_XILINX_GMII2RGMII=y
+CONFIG_USB_USBNET=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SPI=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MAX310X=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_XILINX=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_TPS65086=y
+CONFIG_POWER_RESET_LTC2952=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_MFD_TPS65086=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS65086=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_ZYNQMP_DPSUB=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_FB_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XILINX_DP=y
+CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=y
+CONFIG_SND_SOC_XILINX_SDI=y
+CONFIG_SND_SOC_XILINX_I2S=y
+CONFIG_SND_SOC_XILINX_SPDIF=y
+CONFIG_SND_SOC_XILINX_PL_SND_CARD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_EDAC_ZYNQMP_OCM=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_XILINX_APM=y
+CONFIG_UIO_XILINX_AI_ENGINE=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+CONFIG_XILINX_FCLK=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_COMMON_CLK_SI5324=y
+CONFIG_COMMON_CLK_XLNX_CLKWZRD=y
+CONFIG_COMMON_CLK_ZYNQMP=y
+# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
+# CONFIG_FSL_ERRATUM_A008585 is not set
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQMP_R5_REMOTEPROC=m
+CONFIG_XILINX_VCU=m
+CONFIG_IIO=y
+CONFIG_INA2XX_ADC=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_AMS=y
+CONFIG_XILINX_INTC=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_ARM_CCI_PMU=y
+# CONFIG_ARM_PMU is not set
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_NVMEM_ZYNQMP=y
+CONFIG_FPGA=y
+CONFIG_XILINX_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_ZYNQMP_FPGA=y
+CONFIG_FPGA_MGR_VERSAL_FPGA=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y
+CONFIG_CRYPTO_DEV_XILINX_RSA=y
+CONFIG_CRYPTO_DEV_ZYNQMP_AES=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm64/configs/xilinx_versal_defconfig b/arch/arm64/configs/xilinx_versal_defconfig
new file mode 100644
index 000000000000..ad41341ce50c
--- /dev/null
+++ b/arch/arm64/configs/xilinx_versal_defconfig
@@ -0,0 +1,223 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_NR_CPUS=8
+CONFIG_COMPAT=y
+# CONFIG_EFI is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ZYNQMP_FIRMWARE_DEBUG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_SPARSEMEM_VMEMMAP is not set
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_SNMP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_CADENCE_QUADSPI=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_SRAM=y
+CONFIG_EEPROM_AT24=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_DP83848_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_AMBA_PL010=y
+CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CADENCE=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_ZYNQ=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+CONFIG_DRM=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_XILINX_AI_ENGINE=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+CONFIG_COMMON_CLK_ZYNQMP=y
+# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
+# CONFIG_FSL_ERRATUM_A008585 is not set
+CONFIG_XILINX_INTC=y
+CONFIG_ARM_CCI_PMU=y
+# CONFIG_ARM_PMU is not set
+CONFIG_ANDROID=y
+CONFIG_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_VERSAL_FPGA=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/arm64/configs/xilinx_zynqmp_defconfig b/arch/arm64/configs/xilinx_zynqmp_defconfig
new file mode 100644
index 000000000000..85175f132bc5
--- /dev/null
+++ b/arch/arm64/configs/xilinx_zynqmp_defconfig
@@ -0,0 +1,388 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_NR_CPUS=8
+CONFIG_COMPAT=y
+# CONFIG_DMI is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_CMA=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE=y
+CONFIG_NET_PKTGEN=y
+CONFIG_CAN=y
+CONFIG_CAN_XILINXCAN=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_LEDS=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+CONFIG_BT_HCIVHCI=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_BT_ATH3K=y
+CONFIG_BT_WILINK=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_REG_RELAX_NO_IR=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_MESSAGE_TRACING=y
+CONFIG_MAC80211_DEBUG_MENU=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_NET_9P=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX_NWL=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_ARASAN=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_XILINX_SDFEC=y
+CONFIG_XILINX_JESD204B=y
+CONFIG_XILINX_JESD204B_PHY=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
+CONFIG_TI_ST=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_MACB=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_XILINX_AXI_EMAC=y
+CONFIG_AMD_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_BCM7XXX_PHY=y
+CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_ICPLUS_PHY=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MICREL_PHY=y
+CONFIG_NATIONAL_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_STE10XP=y
+CONFIG_VITESSE_PHY=y
+CONFIG_XILINX_GMII2RGMII=y
+CONFIG_USB_USBNET=y
+CONFIG_WL18XX=y
+CONFIG_WLCORE_SPI=y
+CONFIG_WLCORE_SDIO=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_MAX310X=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_CADENCE=y
+CONFIG_I2C_XILINX=y
+CONFIG_SPI=y
+CONFIG_SPI_CADENCE=y
+CONFIG_SPI_XILINX=y
+CONFIG_SPI_ZYNQMP_GQSPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_GPIO_ZYNQ=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_TPS65086=y
+CONFIG_POWER_RESET_LTC2952=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_MAX20751=y
+CONFIG_WATCHDOG=y
+CONFIG_XILINX_WATCHDOG=y
+CONFIG_CADENCE_WATCHDOG=y
+CONFIG_MFD_TPS65086=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS65086=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_XILINX=y
+CONFIG_VIDEO_XILINX_AXI4S_SWITCH=y
+CONFIG_VIDEO_XILINX_CFA=y
+CONFIG_VIDEO_XILINX_CRESAMPLE=y
+CONFIG_VIDEO_XILINX_DEMOSAIC=y
+CONFIG_VIDEO_XILINX_GAMMA=y
+CONFIG_VIDEO_XILINX_HLS=y
+CONFIG_VIDEO_XILINX_REMAPPER=y
+CONFIG_VIDEO_XILINX_RGB2YUV=y
+CONFIG_VIDEO_XILINX_SCALER=y
+CONFIG_VIDEO_XILINX_MULTISCALER=y
+CONFIG_VIDEO_XILINX_SDIRXSS=y
+CONFIG_VIDEO_XILINX_SWITCH=y
+CONFIG_VIDEO_XILINX_TPG=y
+CONFIG_VIDEO_XILINX_VPSS_CSC=y
+CONFIG_VIDEO_XILINX_VPSS_SCALER=y
+CONFIG_VIDEO_XILINX_CSI2RXSS=y
+CONFIG_VIDEO_XILINX_SCD=y
+CONFIG_VIDEO_XILINX_M2M=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM=y
+CONFIG_DRM_XLNX=y
+CONFIG_DRM_XLNX_BRIDGE=y
+CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS=y
+CONFIG_DRM_ZYNQMP_DPSUB=y
+CONFIG_DRM_XLNX_DSI=y
+CONFIG_DRM_XLNX_MIXER=y
+CONFIG_DRM_XLNX_PL_DISP=y
+CONFIG_DRM_XLNX_SDI=y
+CONFIG_DRM_XLNX_BRIDGE_CSC=y
+CONFIG_DRM_XLNX_BRIDGE_SCALER=y
+CONFIG_FB_XILINX=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_PCI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_XILINX_DP=y
+CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=y
+CONFIG_SND_SOC_XILINX_SDI=y
+CONFIG_SND_SOC_XILINX_I2S=y
+CONFIG_SND_SOC_XILINX_SPDIF=y
+CONFIG_SND_SOC_XILINX_PL_SND_CARD=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEFAULT_PERSIST is not set
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_XILINX=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_LEDS_TRIGGER_CAMERA=y
+CONFIG_EDAC=y
+CONFIG_EDAC_SYNOPSYS=y
+CONFIG_EDAC_ZYNQMP_OCM=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_ZYNQMP=y
+CONFIG_XILINX_DMA=y
+CONFIG_XILINX_ZYNQMP_DMA=y
+CONFIG_DMATEST=y
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_DMEM_GENIRQ=m
+CONFIG_UIO_XILINX_APM=y
+CONFIG_STAGING=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_CMA_HEAP=y
+CONFIG_XILINX_FCLK=y
+CONFIG_COMMON_CLK_SI570=y
+CONFIG_COMMON_CLK_SI5324=y
+CONFIG_COMMON_CLK_XLNX_CLKWZRD=y
+CONFIG_COMMON_CLK_ZYNQMP=y
+CONFIG_REMOTEPROC=y
+CONFIG_ZYNQMP_R5_REMOTEPROC=m
+CONFIG_XILINX_VCU=m
+CONFIG_IIO=y
+CONFIG_INA2XX_ADC=y
+CONFIG_XILINX_XADC=y
+CONFIG_XILINX_AMS=y
+CONFIG_XILINX_INTC=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_NVMEM_ZYNQMP=y
+CONFIG_FPGA=y
+CONFIG_XILINX_AFI_FPGA=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_XILINX_PR_DECOUPLER=y
+CONFIG_FPGA_REGION=y
+CONFIG_OF_FPGA_REGION=y
+CONFIG_FPGA_MGR_ZYNQMP_FPGA=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_BTRFS_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRCT10DIF=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y
+CONFIG_CRYPTO_DEV_XILINX_RSA=y
+CONFIG_CRYPTO_DEV_ZYNQMP_AES=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 9606c244b5b8..10ee6628d4cc 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -47,6 +47,8 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ
+ select GENERIC_IRQ_MULTI_HANDLER
+ select HANDLE_DOMAIN_IRQ
# Endianness selection
choice
@@ -64,6 +66,33 @@ config CPU_LITTLE_ENDIAN
endchoice
+config SMP
+ bool "SMP support (EXPERIMENTAL)"
+ default n
+ help
+ This option enables SMP support for MicroBlaze. Every CPU has its own
+ BRAM connected via LMB. The BRAM is used as CPU private memory, which
+ is one reason CPU hotplug is not yet supported.
+ Timers and interrupt controllers are placed on the same bus and
+ accessible by all CPUs, but every CPU is assigned one timer and one
+ interrupt controller. There is also one free running clock source
+ timer for the whole system. The boot CPU wakes up other CPUs by
+ sending a wake-up software Interrupt to a specific CPU that is
+ sleeping. Wake-up will cause a jump to DDR start address where it is
+ assumed that the kernel is placed. There is currently no support for
+ placing the kernel at a different location.
+
+config GENERIC_LOCKBREAK
+ bool
+ default y
+ depends on SMP && PREEMPT
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-8)"
+ range 2 8
+ depends on SMP
+ default "2"
+
config ZONE_DMA
def_bool y
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
index 41e9aff23a62..522d704fad63 100644
--- a/arch/microblaze/include/asm/atomic.h
+++ b/arch/microblaze/include/asm/atomic.h
@@ -1,28 +1,269 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
#ifndef _ASM_MICROBLAZE_ATOMIC_H
#define _ASM_MICROBLAZE_ATOMIC_H
+#include <linux/types.h>
#include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) READ_ONCE((v)->counter)
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ int result, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* attempt store */
+ " swx %3, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ /* Outputs: result value */
+ : "=&r" (result), "=&r" (tmp)
+ /* Inputs: counter address */
+ : "r" (&v->counter), "r" (i)
+ : "cc", "memory"
+ );
+}
+#define atomic_set atomic_set
+
+/* Atomically perform op with v->counter and i, return result */
+#define ATOMIC_OP_RETURN(op, asm) \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+{ \
+ int result, tmp; \
+ \
+ __asm__ __volatile__ ( \
+ /* load conditional address in %2 to %0 */ \
+ "1: lwx %0, %2, r0;\n" \
+ /* perform operation and save it to result */ \
+ #asm " %0, %3, %0;\n" \
+ /* attempt store */ \
+ " swx %0, %2, r0;\n" \
+ /* checking msr carry flag */ \
+ " addic %1, r0, 0;\n" \
+ /* store failed (MSR[C] set)? try again */ \
+ " bnei %1, 1b;\n" \
+ /* Outputs: result value */ \
+ : "=&r" (result), "=&r" (tmp) \
+ /* Inputs: counter address */ \
+ : "r" (&v->counter), "r" (i) \
+ : "cc", "memory" \
+ ); \
+ \
+ return result; \
+} \
+ \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ atomic_##op##_return_relaxed(i, v); \
+}
+
+/* Atomically perform op with v->counter and i, return orig v->counter */
+#define ATOMIC_FETCH_OP_RELAXED(op, asm) \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+{ \
+ int old, tmp; \
+ \
+ __asm__ __volatile__ ( \
+ /* load conditional address in %2 to %0 */ \
+ "1: lwx %0, %2, r0;\n" \
+ /* perform operation and save it to tmp */ \
+ #asm " %1, %3, %0;\n" \
+ /* attempt store */ \
+ " swx %1, %2, r0;\n" \
+ /* checking msr carry flag */ \
+ " addic %1, r0, 0;\n" \
+ /* store failed (MSR[C] set)? try again */ \
+ " bnei %1, 1b;\n" \
+ /* Outputs: old value */ \
+ : "=&r" (old), "=&r" (tmp) \
+ /* Inputs: counter address */ \
+ : "r" (&v->counter), "r" (i) \
+ : "cc", "memory" \
+ ); \
+ \
+ return old; \
+}
+
+#define ATOMIC_OPS(op, asm) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm) \
+ ATOMIC_OP_RETURN(op, asm)
+
+ATOMIC_OPS(and, and)
+#define atomic_and atomic_and
+#define atomic_and_return_relaxed atomic_and_return_relaxed
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+
+ATOMIC_OPS(add, add)
+#define atomic_add atomic_add
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+
+ATOMIC_OPS(xor, xor)
+#define atomic_xor atomic_xor
+#define atomic_xor_return_relaxed atomic_xor_return_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
+ATOMIC_OPS(or, or)
+#define atomic_or atomic_or
+#define atomic_or_return_relaxed atomic_or_return_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+
+ATOMIC_OPS(sub, rsub)
+#define atomic_sub atomic_sub
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+static inline int atomic_inc_return_relaxed(atomic_t *v)
+{
+ int result, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* increment counter by 1 */
+ " addi %0, %0, 1;\n"
+ /* attempt store */
+ " swx %0, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ /* Outputs: result value */
+ : "=&r" (result), "=&r" (tmp)
+ /* Inputs: counter address */
+ : "r" (&v->counter)
+ : "cc", "memory"
+ );
+
+ return result;
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+ int result, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* increment counter by -1 */
+ " addi %0, %0, -1;\n"
+ /* attempt store */
+ " swx %0, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ /* Outputs: result value */
+ : "=&r" (result), "=&r" (tmp)
+ /* Inputs: counter address */
+ : "r" (&v->counter)
+ : "cc", "memory"
+ );
+
+ return result;
+}
+#define atomic_dec_return atomic_dec_return
+
+static inline void atomic_dec(atomic_t *v)
+{
+ atomic_dec_return(v);
+}
+#define atomic_dec atomic_dec
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int result, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* compare loaded value with old value*/
+ " cmp %1, %0, %3;\n"
+ /* equal to u, don't increment */
+ " beqid %1, 2f;\n"
+ /* increment counter by i */
+ " add %1, %0, %4;\n"
+ /* attempt store of new value*/
+ " swx %1, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ "2:"
+ /* Outputs: result value */
+ : "=&r" (result), "=&r" (tmp)
+ /* Inputs: counter address, old, new */
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc", "memory"
+ );
+
+ return result;
+}
/*
* Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
*/
static inline int atomic_dec_if_positive(atomic_t *v)
{
- unsigned long flags;
- int res;
+ int result, tmp;
- local_irq_save(flags);
- res = v->counter - 1;
- if (res >= 0)
- v->counter = res;
- local_irq_restore(flags);
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* decrement counter by 1*/
+ " addi %0, %0, -1;\n"
+ /* if < 0 abort (*v was <= 0)*/
+ " blti %0, 2f;\n"
+ /* attempt store of new value*/
+ " swx %0, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ "2: "
+ /* Outputs: result value */
+ : "=&r" (result), "=&r" (tmp)
+ /* Inputs: counter address */
+ : "r" (&v->counter)
+ : "cc", "memory"
+ );
- return res;
+ return result;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define atomic_dec_if_positive atomic_dec_if_positive
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#include <asm-generic/atomic64.h>
#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/bitops.h b/arch/microblaze/include/asm/bitops.h
new file mode 100644
index 000000000000..a4f5ca09850f
--- /dev/null
+++ b/arch/microblaze/include/asm/bitops.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Microblaze atomic bit operations.
+ *
+ * Copyright (C) 2013 - 2020 Xilinx, Inc.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on ppc32:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ */
+
+#ifndef _ASM_MICROBLAZE_BITOPS_H
+#define _ASM_MICROBLAZE_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+#include <asm/asm-compat.h>
+#include <linux/stringify.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/* Macro for generating the ***_bits() functions */
+#define DEFINE_BITOP(fn, op) \
+static inline void fn(unsigned long mask, volatile unsigned long *_p) \
+{ \
+ unsigned long tmp; \
+ unsigned long *p = (unsigned long *)_p; \
+ \
+ __asm__ __volatile__ ( \
+ /* load conditional address in %2 to %0 */ \
+ "1: lwx %0, %3, r0;\n" \
+ /* perform bit operation with mask */ \
+ stringify_in_c(op)" %0, %0, %2;\n" \
+ /* attempt store */ \
+ " swx %0, %3, r0;\n" \
+ /* checking msr carry flag */ \
+ " addic %0, r0, 0;\n" \
+ /* store failed (MSR[C] set)? try again */ \
+ " bnei %0, 1b;\n" \
+ : "=&r" (tmp), "+m" (*p) /* Outputs: tmp, p */ \
+ : "r" (mask), "r" (p) /* Inputs: mask, p */ \
+ : "cc", "memory" \
+ ); \
+}
+
+DEFINE_BITOP(set_bits, or)
+DEFINE_BITOP(clear_bits, andn)
+DEFINE_BITOP(clear_bits_unlock, andn)
+DEFINE_BITOP(change_bits, xor)
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+}
+
+static inline void clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+}
+
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
+}
+
+/*
+ * Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
+ * operands.
+ */
+#define DEFINE_TESTOP(fn, op) \
+static inline unsigned long fn(unsigned long mask, \
+ volatile unsigned long *_p) \
+{ \
+ unsigned long old, tmp; \
+ unsigned long *p = (unsigned long *)_p; \
+ \
+ __asm__ __volatile__ ( \
+ /* load conditional address in %4 to %0 */ \
+ "1: lwx %0, %4, r0;\n" \
+ /* perform bit operation with mask */ \
+ stringify_in_c(op)" %1, %0, %3;\n" \
+ /* attempt store */ \
+ " swx %1, %4, r0;\n" \
+ /* checking msr carry flag */ \
+ " addic %1, r0, 0;\n" \
+ /* store failed (MSR[C] set)? try again */ \
+ " bnei %1, 1b;\n" \
+ /* Outputs: old, tmp, p */ \
+ : "=&r" (old), "=&r" (tmp), "+m" (*p) \
+ /* Inputs: mask, p */ \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory" \
+ ); \
+ return (old & mask); \
+}
+
+DEFINE_TESTOP(test_and_set_bits, or)
+DEFINE_TESTOP(test_and_set_bits_lock, or)
+DEFINE_TESTOP(test_and_clear_bits, andn)
+DEFINE_TESTOP(test_and_change_bits, xor)
+
+static inline int test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+}
+
+static inline int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_set_bits_lock(BITOP_MASK(nr),
+ addr + BITOP_WORD(nr)) != 0;
+}
+
+static inline int test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+}
+
+static inline int test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+static inline void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ __clear_bit(nr, addr);
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/fls64.h>
+
+/* Little-endian versions */
+#include <asm-generic/bitops/le.h>
+
+/* Bitmap functions for the ext2 filesystem */
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#endif /* _ASM_MICROBLAZE_BITOPS_H */
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
index 3523b51aab36..0c24ac37df7f 100644
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ b/arch/microblaze/include/asm/cmpxchg.h
@@ -4,6 +4,93 @@
#ifndef CONFIG_SMP
# include <asm-generic/cmpxchg.h>
+#else
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline unsigned long __xchg_u32(volatile void *p, unsigned long val)
+{
+ unsigned long prev, temp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %3 to %0 */
+ "1: lwx %0, %3, r0;\n"
+ /* attempt store of new value */
+ " swx %4, %3, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ /* Outputs: result value */
+ : "=&r" (prev), "=&r" (temp), "+m" (*(volatile unsigned int *)p)
+ /* Inputs: counter address */
+ : "r" (p), "r" (val)
+ : "cc", "memory"
+ );
+
+ return prev;
+}
+
+static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ if (size == 4)
+ return __xchg_u32(ptr, x);
+
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+})
+
+static inline unsigned long __cmpxchg_u32(volatile unsigned int *p,
+ unsigned long old, unsigned long new)
+{
+ int result, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %3 to %0 */
+ "1: lwx %0, %3, r0;\n"
+ /* compare loaded value with old value */
+ " cmp %2, %0, %4;\n"
+ /* not equal to old value, write old value */
+ " bnei %2, 2f;\n"
+ /* attempt store of new value*/
+ " swx %5, %3, r0;\n"
+ /* checking msr carry flag */
+ " addic %2, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %2, 1b;\n"
+ "2: "
+ /* Outputs : result value */
+ : "=&r" (result), "+m" (*p), "=&r" (tmp)
+ /* Inputs : counter address, old, new */
+ : "r" (p), "r" (old), "r" (new), "r" (&tmp)
+ : "cc", "memory"
+ );
+
+ return result;
+}
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ if (size == 4)
+ return __cmpxchg_u32(ptr, old, new);
+
+ __xchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
+})
+
+
#endif
#endif /* _ASM_MICROBLAZE_CMPXCHG_H */
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index 786ffa669bf1..b8b04cd0095d 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -84,7 +84,7 @@ struct cpuinfo {
u32 pvr_user2;
};
-extern struct cpuinfo cpuinfo;
+DECLARE_PER_CPU(struct cpuinfo, cpu_info);
/* fwd declarations of the various CPUinfo populators */
void setup_cpuinfo(void);
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 6c42bed41166..5acbf48f2d4f 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -10,7 +10,6 @@
#ifndef _ASM_MICROBLAZE_ENTRY_H
#define _ASM_MICROBLAZE_ENTRY_H
-#include <asm/percpu.h>
#include <asm/ptrace.h>
#include <linux/linkage.h>
@@ -21,12 +20,23 @@
#define PER_CPU(var) var
+#ifdef CONFIG_SMP
+/* Addresses in BRAM */
+#define CURRENT_SAVE_ADDR 0x50
+#define ENTRY_SP_ADDR 0x54
+#define PT_POOL_SPACE_ADDR 0x100
+#endif /* CONFIG_SMP */
+
# ifndef __ASSEMBLY__
+#include <asm/percpu.h>
+
+#ifndef CONFIG_SMP
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
DECLARE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
DECLARE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+#endif /* CONFIG_SMP */
extern asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall);
# endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h
new file mode 100644
index 000000000000..f7df634fd6e4
--- /dev/null
+++ b/arch/microblaze/include/asm/hardirq.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Xilinx, Inc.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+#ifndef _ASM_MICROBLAZE_HARDIRQ_H
+#define _ASM_MICROBLAZE_HARDIRQ_H
+
+# ifndef CONFIG_SMP
+#include <asm-generic/hardirq.h>
+# else
+#include <linux/cache.h>
+#include <linux/percpu.h>
+#include <linux/threads.h>
+#include <asm/irq.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int ipi_irqs[MICROBLAZE_NUM_IPIS];
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
+#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
+
+u64 smp_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu smp_irq_stat_cpu
+
+extern unsigned long irq_err_count;
+
+static inline void ack_bad_irq(unsigned int irq)
+{
+ irq_err_count++;
+}
+# endif /* CONFIG_MMU */
+
+#endif /* _ASM_MICROBLAZE_HARDIRQ_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index 0a28e80bbab0..cb6ab55d1d01 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -11,7 +11,4 @@
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
-/* should be defined in each interrupt controller driver */
-extern unsigned int xintc_get_irq(void);
-
#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 97f1243101cc..5312b307b79a 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -15,7 +15,10 @@
# ifndef __ASSEMBLY__
/* Default "unsigned long" context */
-typedef unsigned long mm_context_t;
+typedef struct {
+ unsigned int id;
+ unsigned int active;
+} mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h
index a1c7dd48454c..5ea769225b12 100644
--- a/arch/microblaze/include/asm/mmu_context_mm.h
+++ b/arch/microblaze/include/asm/mmu_context_mm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
+ * Copyright (C) 2013-2020 Xilinx, Inc
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
@@ -13,10 +14,13 @@
#include <linux/sched.h>
#include <asm/bitops.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm-generic/mm_hooks.h>
-# ifdef __KERNEL__
/*
* This function defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
@@ -43,7 +47,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/*
* Set the current MMU context.
- * This is done byloading up the segment registers for the user part of the
+ * This is done by loading up the segment registers for the user part of the
* address space.
*
* Since the PGD is immediately available, it is much faster to simply
@@ -51,75 +55,42 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
-extern void set_context(mm_context_t context, pgd_t *pgd);
-
-/*
- * Bitmap of contexts in use.
- * The size of this bitmap is LAST_CONTEXT + 1 bits.
- */
-extern unsigned long context_map[];
-
-/*
- * This caches the next context number that we expect to be free.
- * Its use is an optimization only, we can't rely on this context
- * number to be free, but it usually will be.
- */
-extern mm_context_t next_mmu_context;
+extern void set_context(unsigned long id, pgd_t *pgd);
/*
* Since we don't have sufficient contexts to give one to every task
* that could be in the system, we need to be able to steal contexts.
- * These variables support that.
*/
-extern atomic_t nr_free_contexts;
-extern struct mm_struct *context_mm[LAST_CONTEXT+1];
extern void steal_context(void);
/*
- * Get a new mmu context for the address space described by `mm'.
+ * Set up the context for a new address space.
*/
-static inline void get_mmu_context(struct mm_struct *mm)
-{
- mm_context_t ctx;
-
- if (mm->context != NO_CONTEXT)
- return;
- while (atomic_dec_if_positive(&nr_free_contexts) < 0)
- steal_context();
- ctx = next_mmu_context;
- while (test_and_set_bit(ctx, context_map)) {
- ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
- if (ctx > LAST_CONTEXT)
- ctx = 0;
- }
- next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context = ctx;
- context_mm[ctx] = mm;
-}
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
/*
- * Set up the context for a new address space.
+ * We're finished using the context for an address space.
*/
-# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
+extern void destroy_context(struct mm_struct *mm);
/*
- * We're finished using the context for an address space.
+ * Switch context
*/
-static inline void destroy_context(struct mm_struct *mm)
-{
- if (mm->context != NO_CONTEXT) {
- clear_bit(mm->context, context_map);
- mm->context = NO_CONTEXT;
- atomic_inc(&nr_free_contexts);
- }
-}
+extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+ /* Mark this context has been used on the new CPU */
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
tsk->thread.pgdir = next->pgd;
- get_mmu_context(next);
- set_context(next->context, next->pgd);
+
+ /* Nothing else to do if we aren't actually switching */
+ if (prev == next)
+ return;
+
+ /* Out of line for now */
+ switch_mmu_context(prev, next);
}
/*
@@ -129,12 +100,13 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
static inline void activate_mm(struct mm_struct *active_mm,
struct mm_struct *mm)
{
- current->thread.pgdir = mm->pgd;
- get_mmu_context(mm);
- set_context(mm->context, mm->pgd);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm(active_mm, mm, current);
+ local_irq_restore(flags);
}
extern void mmu_context_init(void);
-# endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 6b056f6545d8..ae76d5cd2025 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -368,20 +368,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
- unsigned long flags, old, tmp;
-
- raw_local_irq_save(flags);
-
- __asm__ __volatile__( "lw %0, %2, r0 \n"
- "andn %1, %0, %3 \n"
- "or %1, %1, %4 \n"
- "sw %1, %2, r0 \n"
+ unsigned long old, tmp;
+
+ __asm__ __volatile__(
+ "1: lwx %0, %2, r0;\n"
+ " andn %1, %0, %3;\n"
+ " or %1, %1, %4;\n"
+ " swx %1, %2, r0;\n"
+ " addic %1, r0, 0;\n"
+ " bnei %1, 1b;\n"
: "=&r" (old), "=&r" (tmp)
: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
: "cc");
- raw_local_irq_restore(flags);
-
return old;
}
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index a9311ad84a67..9da44d048522 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -14,6 +14,9 @@
extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
+extern char __initramfs_end[];
+extern char _end_tlb_mapping[];
+
extern u32 _fdt_start[], _fdt_end[];
# endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/include/asm/smp.h b/arch/microblaze/include/asm/smp.h
new file mode 100644
index 000000000000..de28d172f833
--- /dev/null
+++ b/arch/microblaze/include/asm/smp.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * smp.h: MicroBlaze-specific SMP code
+ *
+ * Original was a copy of PowerPC smp.h, which was a copy of
+ * sparc smp.h. Now heavily modified for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#ifndef _ASM_MICROBLAZE_SMP_H
+#define _ASM_MICROBLAZE_SMP_H
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+
+#include <asm/percpu.h>
+
+void handle_IPI(int ipinr, struct pt_regs *regs);
+
+void set_smp_cross_call(void (*)(unsigned int, unsigned int));
+
+void smp_send_debugger_break(void);
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+enum microblaze_msg {
+ MICROBLAZE_MSG_RESCHEDULE = 0,
+ MICROBLAZE_MSG_CALL_FUNCTION,
+ MICROBLAZE_MSG_CALL_FUNCTION_SINGLE,
+ MICROBLAZE_MSG_DEBUGGER_BREAK,
+ MICROBLAZE_NUM_IPIS
+};
+
+void start_secondary(void);
+extern struct thread_info *secondary_ti;
+void secondary_machine_init(void);
+
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#endif /* _ASM_MICROBLAZE_SMP_H */
diff --git a/arch/microblaze/include/asm/spinlock.h b/arch/microblaze/include/asm/spinlock.h
new file mode 100644
index 000000000000..0199ea9f7f0f
--- /dev/null
+++ b/arch/microblaze/include/asm/spinlock.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#ifndef _ASM_MICROBLAZE_SPINLOCK_H
+#define _ASM_MICROBLAZE_SPINLOCK_H
+
+/*
+ * Unlocked value: 0
+ * Locked value: 1
+ */
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %1, r0;\n"
+ /* not zero? try again */
+ " bnei %0, 1b;\n"
+ /* increment lock by 1 */
+ " addi %0, r0, 1;\n"
+ /* attempt store */
+ " swx %0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&lock->lock)
+ : "cc", "memory"
+ );
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long prev, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %2 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* not zero? clear reservation */
+ " bneid %0, 2f;\n"
+ /* increment lock by one if lwx was sucessful */
+ " addi %1, r0, 1;\n"
+ /* attempt store */
+ " swx %1, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ "2:"
+ /* Outputs: temp variable for load result */
+ : "=&r" (prev), "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&lock->lock)
+ : "cc", "memory"
+ );
+
+ return (prev == 0);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %1, r0;\n"
+ /* clear */
+ " swx r0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&lock->lock)
+ : "cc", "memory"
+ );
+}
+
+/* RWLOCKS */
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %1, r0;\n"
+ /* not zero? try again */
+ " bneid %0, 1b;\n"
+ /* set tmp to -1 */
+ " addi %0, r0, -1;\n"
+ /* attempt store */
+ " swx %0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long prev, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to tmp */
+ "1: lwx %0, %2, r0;\n"
+ /* not zero? abort */
+ " bneid %0, 2f;\n"
+ /* set tmp to -1 */
+ " addi %1, r0, -1;\n"
+ /* attempt store */
+ " swx %1, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ "2:"
+ /* Outputs: temp variable for load result */
+ : "=&r" (prev), "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+ /* prev value should be zero and MSR should be clear */
+ return (prev == 0);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %1, r0;\n"
+ /* clear */
+ " swx r0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+}
+
+/* Read locks */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %1, r0;\n"
+ /* < 0 (WRITE LOCK active) try again */
+ " bltid %0, 1b;\n"
+ /* increment lock by 1 if lwx was sucessful */
+ " addi %0, %0, 1;\n"
+ /* attempt store */
+ " swx %0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to tmp */
+ "1: lwx %0, %1, r0;\n"
+ /* tmp = tmp - 1 */
+ " addi %0, %0, -1;\n"
+ /* attempt store */
+ " swx %0, %1, r0;\n"
+ /* checking msr carry flag */
+ " addic %0, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %0, 1b;\n"
+ /* Outputs: temp variable for load result */
+ : "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long prev, tmp;
+
+ __asm__ __volatile__ (
+ /* load conditional address in %1 to %0 */
+ "1: lwx %0, %2, r0;\n"
+ /* < 0 bail, release lock */
+ " bltid %0, 2f;\n"
+ /* increment lock by 1 */
+ " addi %1, %0, 1;\n"
+ /* attempt store */
+ " swx %1, %2, r0;\n"
+ /* checking msr carry flag */
+ " addic %1, r0, 0;\n"
+ /* store failed (MSR[C] set)? try again */
+ " bnei %1, 1b;\n"
+ "2:"
+ /* Outputs: temp variable for load result */
+ : "=&r" (prev), "=&r" (tmp)
+ /* Inputs: lock address */
+ : "r" (&rw->lock)
+ : "cc", "memory"
+ );
+ return (prev >= 0);
+}
+
+#endif /* _ASM_MICROBLAZE_SPINLOCK_H */
diff --git a/arch/microblaze/include/asm/spinlock_types.h b/arch/microblaze/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..ffd3588f6546
--- /dev/null
+++ b/arch/microblaze/include/asm/spinlock_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#ifndef __ASM_MICROBLAZE_SPINLOCK_TYPES_H
+#define __ASM_MICROBLAZE_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile signed int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index 6f8f5c77a050..c4967aeb0b9b 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -17,6 +17,8 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
+#define MMU_NO_CONTEXT ((unsigned int)-1)
+
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index dd71637437f4..e8b0a47cd614 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_MMU) += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_SMP) += smp.o
obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index dcba53803fa5..818152e37375 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -12,6 +12,7 @@
#include <asm/cacheflush.h>
#include <linux/cache.h>
+#include <linux/smp.h>
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
@@ -158,6 +159,8 @@ do { \
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -166,15 +169,15 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.icache_line_length, cpuinfo.icache_size);
+ cpuinfo->icache_line_length, cpuinfo->icache_size);
local_irq_save(flags);
__disable_icache_msr();
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->icache_line_length, wic);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo->icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
@@ -185,6 +188,8 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
static void __flush_icache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -193,15 +198,15 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.icache_line_length, cpuinfo.icache_size);
+ cpuinfo->icache_line_length, cpuinfo->icache_size);
local_irq_save(flags);
__disable_icache_nomsr();
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->icache_line_length, wic);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo->icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
@@ -213,6 +218,8 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
static void __flush_icache_range_noirq(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
@@ -220,11 +227,11 @@ static void __flush_icache_range_noirq(unsigned long start,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.icache_line_length, cpuinfo.icache_size);
+ cpuinfo->icache_line_length, cpuinfo->icache_size);
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->icache_line_length, wic);
#else
- for (i = start; i < end; i += cpuinfo.icache_line_length)
+ for (i = start; i < end; i += cpuinfo->icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
@@ -232,6 +239,8 @@ static void __flush_icache_range_noirq(unsigned long start,
static void __flush_icache_all_msr_irq(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -241,11 +250,10 @@ static void __flush_icache_all_msr_irq(void)
local_irq_save(flags);
__disable_icache_msr();
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+ CACHE_ALL_LOOP(cpuinfo->icache_size, cpuinfo->icache_line_length, wic);
#else
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line_length)
- __asm__ __volatile__ ("wic %0, r0;" \
+ for (i = 0; i < cpuinfo->icache_size; i += cpuinfo->icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_msr();
@@ -254,6 +262,8 @@ static void __flush_icache_all_msr_irq(void)
static void __flush_icache_all_nomsr_irq(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -263,11 +273,10 @@ static void __flush_icache_all_nomsr_irq(void)
local_irq_save(flags);
__disable_icache_nomsr();
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+ CACHE_ALL_LOOP(cpuinfo->icache_size, cpuinfo->icache_line_length, wic);
#else
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line_length)
- __asm__ __volatile__ ("wic %0, r0;" \
+ for (i = 0; i < cpuinfo->icache_size; i += cpuinfo->icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_nomsr();
@@ -276,22 +285,25 @@ static void __flush_icache_all_nomsr_irq(void)
static void __flush_icache_all_noirq(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+ CACHE_ALL_LOOP(cpuinfo->icache_size, cpuinfo->icache_line_length, wic);
#else
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line_length)
- __asm__ __volatile__ ("wic %0, r0;" \
+ for (i = 0; i < cpuinfo->icache_size; i += cpuinfo->icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
}
static void __invalidate_dcache_all_msr_irq(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -301,11 +313,10 @@ static void __invalidate_dcache_all_msr_irq(void)
local_irq_save(flags);
__disable_dcache_msr();
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+ CACHE_ALL_LOOP(cpuinfo->dcache_size, cpuinfo->dcache_line_length, wdc);
#else
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc %0, r0;" \
+ for (i = 0; i < cpuinfo->dcache_size; i += cpuinfo->dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_msr();
@@ -314,6 +325,8 @@ static void __invalidate_dcache_all_msr_irq(void)
static void __invalidate_dcache_all_nomsr_irq(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -323,11 +336,10 @@ static void __invalidate_dcache_all_nomsr_irq(void)
local_irq_save(flags);
__disable_dcache_nomsr();
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+ CACHE_ALL_LOOP(cpuinfo->dcache_size, cpuinfo->dcache_line_length, wdc);
#else
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc %0, r0;" \
+ for (i = 0; i < cpuinfo->dcache_size; i += cpuinfo->dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_nomsr();
@@ -336,16 +348,17 @@ static void __invalidate_dcache_all_nomsr_irq(void)
static void __invalidate_dcache_all_noirq_wt(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+ CACHE_ALL_LOOP(cpuinfo->dcache_size, cpuinfo->dcache_line_length, wdc);
#else
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc %0, r0;" \
+ for (i = 0; i < cpuinfo->dcache_size; i += cpuinfo->dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
}
@@ -359,17 +372,18 @@ static void __invalidate_dcache_all_noirq_wt(void)
*/
static void __invalidate_dcache_all_wb(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ CACHE_ALL_LOOP(cpuinfo->dcache_size, cpuinfo->dcache_line_length,
wdc);
#else
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc %0, r0;" \
+ for (i = 0; i < cpuinfo->dcache_size; i += cpuinfo->dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
}
@@ -377,6 +391,8 @@ static void __invalidate_dcache_all_wb(void)
static void __invalidate_dcache_range_wb(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
@@ -384,11 +400,11 @@ static void __invalidate_dcache_range_wb(unsigned long start,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ cpuinfo->dcache_line_length, cpuinfo->dcache_size);
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo->dcache_line_length, wdc.clear);
#else
- for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ for (i = start; i < end; i += cpuinfo->dcache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
@@ -397,18 +413,20 @@ static void __invalidate_dcache_range_wb(unsigned long start,
static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ cpuinfo->dcache_line_length, cpuinfo->dcache_size);
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ for (i = start; i < end; i += cpuinfo->dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -417,6 +435,8 @@ static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -424,15 +444,15 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ cpuinfo->dcache_line_length, cpuinfo->dcache_size);
local_irq_save(flags);
__disable_dcache_msr();
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ for (i = start; i < end; i += cpuinfo->dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -444,6 +464,8 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
unsigned long flags;
#ifndef ASM_LOOP
int i;
@@ -452,15 +474,15 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ cpuinfo->dcache_line_length, cpuinfo->dcache_size);
local_irq_save(flags);
__disable_dcache_nomsr();
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo->dcache_line_length, wdc);
#else
- for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ for (i = start; i < end; i += cpuinfo->dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
@@ -471,23 +493,26 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
static void __flush_dcache_all_wb(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
- CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ CACHE_ALL_LOOP(cpuinfo->dcache_size, cpuinfo->dcache_line_length,
wdc.flush);
#else
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line_length)
- __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ for (i = 0; i < cpuinfo->dcache_size; i += cpuinfo->dcache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
}
static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
#ifndef ASM_LOOP
int i;
#endif
@@ -495,11 +520,11 @@ static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
- cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ cpuinfo->dcache_line_length, cpuinfo->dcache_size);
#ifdef ASM_LOOP
- CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo->dcache_line_length, wdc.flush);
#else
- for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ for (i = start; i < end; i += cpuinfo->dcache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
@@ -608,16 +633,19 @@ static const struct scache wt_nomsr_noirq = {
void microblaze_cache_init(void)
{
- if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
- if (cpuinfo.dcache_wb) {
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
+
+ if (cpuinfo->use_instr & PVR2_USE_MSR_INSTR) {
+ if (cpuinfo->dcache_wb) {
pr_info("wb_msr\n");
mbc = (struct scache *)&wb_msr;
- if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+ if (cpuinfo->ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
pr_info("WB won't work properly\n");
}
} else {
- if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ if (cpuinfo->ver_code >= CPUVER_7_20_A) {
pr_info("wt_msr_noirq\n");
mbc = (struct scache *)&wt_msr_noirq;
} else {
@@ -626,15 +654,15 @@ void microblaze_cache_init(void)
}
}
} else {
- if (cpuinfo.dcache_wb) {
+ if (cpuinfo->dcache_wb) {
pr_info("wb_nomsr\n");
mbc = (struct scache *)&wb_nomsr;
- if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+ if (cpuinfo->ver_code <= CPUVER_7_20_D) {
/* MS: problem with signal handling - hw bug */
pr_info("WB won't work properly\n");
}
} else {
- if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ if (cpuinfo->ver_code >= CPUVER_7_20_A) {
pr_info("wt_nomsr_noirq\n");
mbc = (struct scache *)&wt_nomsr_noirq;
} else {
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index cd9b4450763b..e2b87f136ba8 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (C) 2013-2020 Xilinx, Inc. All rights reserved.
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2007 John Williams <john.williams@petalogix.com>
@@ -10,6 +11,7 @@
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/smp.h>
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
@@ -56,7 +58,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
};
/*
- * FIXME Not sure if the actual key is defined by Xilinx in the PVR
+ * The actual key is defined by Xilinx in the PVR
*/
const struct family_string_key family_string_lookup[] = {
{"virtex2", 0x4},
@@ -85,37 +87,40 @@ const struct family_string_key family_string_lookup[] = {
{NULL, 0},
};
-struct cpuinfo cpuinfo;
-static struct device_node *cpu;
+DEFINE_PER_CPU(struct cpuinfo, cpu_info);
void __init setup_cpuinfo(void)
{
- cpu = of_get_cpu_node(0, NULL);
+ struct device_node *cpu;
+ unsigned int cpu_id = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu_id);
+
+ cpu = of_get_cpu_node(cpu_id, NULL);
if (!cpu)
pr_err("You don't have cpu or are missing cpu reg property!!!\n");
- pr_info("%s: initialising\n", __func__);
+ pr_info("%s: initialising cpu %d\n", __func__, cpu_id);
switch (cpu_has_pvr()) {
case 0:
pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
__func__);
- set_cpuinfo_static(&cpuinfo, cpu);
+ set_cpuinfo_static(cpuinfo, cpu);
break;
/* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
* please do not use FULL PVR with MMU */
case 1:
pr_info("%s: Using full CPU PVR support\n",
__func__);
- set_cpuinfo_static(&cpuinfo, cpu);
- set_cpuinfo_pvr_full(&cpuinfo, cpu);
+ set_cpuinfo_static(cpuinfo, cpu);
+ set_cpuinfo_pvr_full(cpuinfo, cpu);
break;
default:
pr_warn("%s: Unsupported PVR setting\n", __func__);
- set_cpuinfo_static(&cpuinfo, cpu);
+ set_cpuinfo_static(cpuinfo, cpu);
}
- if (cpuinfo.mmu_privins)
+ if (cpuinfo->mmu_privins)
pr_warn("%s: Stream instructions enabled"
" - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
@@ -125,17 +130,24 @@ void __init setup_cpuinfo(void)
void __init setup_cpuinfo_clk(void)
{
struct clk *clk;
+ struct device_node *cpu;
+ unsigned int cpu_id = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu_id);
+
+ cpu = of_get_cpu_node(cpu_id, NULL);
+ if (!cpu)
+ pr_err("You don't have cpu or are missing cpu reg property!!!\n");
clk = of_clk_get(cpu, 0);
if (IS_ERR(clk)) {
pr_err("ERROR: CPU CCF input clock not found\n");
/* take timebase-frequency from DTS */
- cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency");
+ cpuinfo->cpu_clock_freq = fcpu(cpu, "timebase-frequency");
} else {
- cpuinfo.cpu_clock_freq = clk_get_rate(clk);
+ cpuinfo->cpu_clock_freq = clk_get_rate(clk);
}
- if (!cpuinfo.cpu_clock_freq) {
+ if (!cpuinfo->cpu_clock_freq) {
pr_err("ERROR: CPU clock frequency not setup\n");
BUG();
}
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 9581d194d9e4..7a6fc13f925a 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -1,6 +1,7 @@
/*
* CPU-version specific code
*
+ * Copyright (C) 2013-2020 Xilinx, Inc. All rights reserved
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2006-2009 PetaLogix
*
@@ -27,117 +28,135 @@
static int show_cpuinfo(struct seq_file *m, void *v)
{
- char *fpga_family = "Unknown";
- char *cpu_ver = "Unknown";
- int i;
-
- /* Denormalised to get the fpga family string */
- for (i = 0; family_string_lookup[i].s != NULL; i++) {
- if (cpuinfo.fpga_family_code == family_string_lookup[i].k) {
- fpga_family = (char *)family_string_lookup[i].s;
- break;
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
+ char *fpga_family = "Unknown";
+ char *cpu_ver = "Unknown";
+ int i;
+
+ /* Denormalised to get the fpga family string */
+ for (i = 0; family_string_lookup[i].s != NULL; i++) {
+ if (cpuinfo->fpga_family_code ==
+ family_string_lookup[i].k) {
+ fpga_family = (char *)family_string_lookup[i].s;
+ break;
+ }
}
- }
- /* Denormalised to get the hw version string */
- for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
- if (cpuinfo.ver_code == cpu_ver_lookup[i].k) {
- cpu_ver = (char *)cpu_ver_lookup[i].s;
- break;
+ /* Denormalised to get the hw version string */
+ for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
+ if (cpuinfo->ver_code == cpu_ver_lookup[i].k) {
+ cpu_ver = (char *)cpu_ver_lookup[i].s;
+ break;
+ }
}
- }
- seq_printf(m,
- "CPU-Family: MicroBlaze\n"
- "FPGA-Arch: %s\n"
- "CPU-Ver: %s, %s endian\n"
- "CPU-MHz: %d.%02d\n"
- "BogoMips: %lu.%02lu\n",
- fpga_family,
- cpu_ver,
- cpuinfo.endian ? "little" : "big",
- cpuinfo.cpu_clock_freq / 1000000,
- cpuinfo.cpu_clock_freq % 1000000,
- loops_per_jiffy / (500000 / HZ),
- (loops_per_jiffy / (5000 / HZ)) % 100);
-
- seq_printf(m,
- "HW:\n Shift:\t\t%s\n"
- " MSR:\t\t%s\n"
- " PCMP:\t\t%s\n"
- " DIV:\t\t%s\n",
- (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no",
- (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no",
- (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no",
- (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no");
-
- seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu);
-
- seq_printf(m,
- " MUL:\t\t%s\n"
- " FPU:\t\t%s\n",
- (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
- (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no",
- (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
- (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
-
- seq_printf(m,
- " Exc:\t\t%s%s%s%s%s%s%s%s\n",
- (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "",
- (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "",
- (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "",
- (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "",
- (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "",
- (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "",
- (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
- (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
-
- seq_printf(m,
- "Stream-insns:\t%sprivileged\n",
- cpuinfo.mmu_privins ? "un" : "");
-
- if (cpuinfo.use_icache)
seq_printf(m,
- "Icache:\t\t%ukB\tline length:\t%dB\n",
- cpuinfo.icache_size >> 10,
- cpuinfo.icache_line_length);
- else
- seq_puts(m, "Icache:\t\tno\n");
+ "Processor: %u\n"
+ "CPU-Family: MicroBlaze\n"
+ "FPGA-Arch: %s\n"
+ "CPU-Ver: %s, %s endian\n"
+ "CPU-MHz: %d.%02d\n"
+ "BogoMips: %lu.%02lu\n",
+ cpu,
+ fpga_family,
+ cpu_ver,
+ cpuinfo->endian ? "little" : "big",
+ cpuinfo->cpu_clock_freq / 1000000,
+ cpuinfo->cpu_clock_freq % 1000000,
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m,
+ "HW:\n Shift:\t\t%s\n"
+ " MSR:\t\t%s\n"
+ " PCMP:\t\t%s\n"
+ " DIV:\t\t%s\n",
+ (cpuinfo->use_instr & PVR0_USE_BARREL_MASK) ?
+ "yes" : "no",
+ (cpuinfo->use_instr & PVR2_USE_MSR_INSTR) ?
+ "yes" : "no",
+ (cpuinfo->use_instr & PVR2_USE_PCMP_INSTR) ?
+ "yes" : "no",
+ (cpuinfo->use_instr & PVR0_USE_DIV_MASK) ?
+ "yes" : "no");
+
+ seq_printf(m, " MMU:\t\t%x\n", cpuinfo->mmu);
+
+ seq_printf(m,
+ " MUL:\t\t%s\n"
+ " FPU:\t\t%s\n",
+ (cpuinfo->use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
+ (cpuinfo->use_mult & PVR0_USE_HW_MUL_MASK) ?
+ "v1" : "no",
+ (cpuinfo->use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
+ (cpuinfo->use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
+
+ seq_printf(m,
+ " Exc:\t\t%s%s%s%s%s%s%s%s\n",
+ (cpuinfo->use_exc & PVR2_OPCODE_0x0_ILL_MASK) ?
+ "op0x0 " : "",
+ (cpuinfo->use_exc & PVR2_UNALIGNED_EXC_MASK) ?
+ "unal " : "",
+ (cpuinfo->use_exc & PVR2_ILL_OPCODE_EXC_MASK) ?
+ "ill " : "",
+ (cpuinfo->use_exc & PVR2_IOPB_BUS_EXC_MASK) ?
+ "iopb " : "",
+ (cpuinfo->use_exc & PVR2_DOPB_BUS_EXC_MASK) ?
+ "dopb " : "",
+ (cpuinfo->use_exc & PVR2_DIV_ZERO_EXC_MASK) ?
+ "zero " : "",
+ (cpuinfo->use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
+ (cpuinfo->use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
- if (cpuinfo.use_dcache) {
seq_printf(m,
- "Dcache:\t\t%ukB\tline length:\t%dB\n",
- cpuinfo.dcache_size >> 10,
- cpuinfo.dcache_line_length);
- seq_puts(m, "Dcache-Policy:\t");
- if (cpuinfo.dcache_wb)
- seq_puts(m, "write-back\n");
+ "Stream-insns:\t%sprivileged\n",
+ cpuinfo->mmu_privins ? "un" : "");
+
+ if (cpuinfo->use_icache)
+ seq_printf(m,
+ "Icache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo->icache_size >> 10,
+ cpuinfo->icache_line_length);
else
- seq_puts(m, "write-through\n");
- } else {
- seq_puts(m, "Dcache:\t\tno\n");
- }
+ seq_puts(m, "Icache:\t\tno\n");
+
+ if (cpuinfo->use_dcache) {
+ seq_printf(m,
+ "Dcache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo->dcache_size >> 10,
+ cpuinfo->dcache_line_length);
+ seq_puts(m, "Dcache-Policy:\t");
+ if (cpuinfo->dcache_wb)
+ seq_puts(m, "write-back\n");
+ else
+ seq_puts(m, "write-through\n");
+ } else {
+ seq_puts(m, "Dcache:\t\tno\n");
+ }
- seq_printf(m,
- "HW-Debug:\t%s\n",
- cpuinfo.hw_debug ? "yes" : "no");
+ seq_printf(m,
+ "HW-Debug:\t%s\n",
+ cpuinfo->hw_debug ? "yes" : "no");
- seq_printf(m,
- "PVR-USR1:\t%02x\n"
- "PVR-USR2:\t%08x\n",
- cpuinfo.pvr_user1,
- cpuinfo.pvr_user2);
+ seq_printf(m,
+ "PVR-USR1:\t%02x\n"
+ "PVR-USR2:\t%08x\n",
+ cpuinfo->pvr_user1,
+ cpuinfo->pvr_user2);
- seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
+ seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
+ seq_puts(m, "\n");
+ }
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
- int i = *pos;
-
- return i < NR_CPUS ? (void *) (i + 1) : NULL;
+ return *pos < 1 ? (void *) 1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index b179f8f6d287..710a8659aa89 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -42,6 +42,11 @@ syscall_debug_table:
.space (__NR_syscalls * 4)
#endif /* DEBUG */
+#ifdef CONFIG_SMP
+#define CURRENT_SAVE CURRENT_SAVE_ADDR
+#define ENTRY_SP ENTRY_SP_ADDR
+#endif /* CONFIG_SMP */
+
#define C_ENTRY(name) .globl name; .align 4; name
/*
@@ -91,6 +96,10 @@ syscall_debug_table:
.macro clear_vms_ums
msrclr r0, MSR_VMS | MSR_UMS
.endm
+
+ .macro save_clear_vm
+ msrclr r11, MSR_VM
+ .endm
#else
.macro clear_bip
mfs r11, rmsr
@@ -153,6 +162,12 @@ syscall_debug_table:
andni r11, r11, (MSR_VMS|MSR_UMS)
mts rmsr,r11
.endm
+
+ .macro save_clear_vm
+ mfs r11, rmsr
+ andni r11, r11, MSR_VM
+ mts rmsr,r11
+ .endm
#endif
/* Define how to call high-level functions. With MMU, virtual mode must be
@@ -252,15 +267,32 @@ syscall_debug_table:
mts rmsr , r11; \
RESTORE_REGS_GP
+#ifndef CONFIG_SMP
+#define LOAD_PER_CPU(reg, addr) lwi reg, r0, TOPHYS(PER_CPU(addr));
+#define STORE_PER_CPU(reg, addr) swi reg, r0, TOPHYS(PER_CPU(addr));
+#define STORE_PER_CPU_VM(reg, addr) swi reg, r0, PER_CPU(addr);
+#else
+#define LOAD_PER_CPU(reg, addr) lwi reg, r0, PER_CPU(addr);
+#define STORE_PER_CPU(reg, addr) swi reg, r0, PER_CPU(addr);
+#define STORE_PER_CPU_VM(reg, addr) \
+ save_clear_vm; \
+ bri TOPHYS(1f); \
+1: \
+ swi reg, r0, PER_CPU(addr); \
+ mts rmsr, r11; \
+ bri __phys_to_virt(2f); \
+2:
+#endif /* CONFIG_SMP */
+
#define SAVE_STATE \
- swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
+ STORE_PER_CPU(r1, ENTRY_SP) /* save stack */ \
/* See if already in kernel mode.*/ \
mfs r1, rmsr; \
andi r1, r1, MSR_UMS; \
bnei r1, 1f; \
/* Kernel-mode state save. */ \
/* Reload kernel stack-ptr. */ \
- lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ LOAD_PER_CPU(r1, ENTRY_SP) \
/* FIXME: I can add these two lines to one */ \
/* tophys(r1,r1); */ \
/* addik r1, r1, -PT_SIZE; */ \
@@ -269,7 +301,7 @@ syscall_debug_table:
brid 2f; \
swi r1, r1, PT_MODE; \
1: /* User-mode state save. */ \
- lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+ LOAD_PER_CPU(r1, CURRENT_SAVE) /* get saved current */ \
tophys(r1,r1); \
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
/* MS these three instructions can be added to one */ \
@@ -278,12 +310,12 @@ syscall_debug_table:
/* addik r1, r1, -PT_SIZE; */ \
addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
SAVE_REGS \
- lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ LOAD_PER_CPU(r11, ENTRY_SP) \
swi r11, r1, PT_R1; /* Store user SP. */ \
swi r0, r1, PT_MODE; /* Was in user-mode. */ \
/* MS: I am clearing UMS even in case when I come from kernel space */ \
clear_ums; \
-2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2: LOAD_PER_CPU(CURRENT_TASK, CURRENT_SAVE)
.text
@@ -300,10 +332,10 @@ syscall_debug_table:
* are masked. This is nice, means we don't have to CLI before state save
*/
C_ENTRY(_user_exception):
- swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ STORE_PER_CPU(r1, ENTRY_SP) /* save stack */
addi r14, r14, 4 /* return address is 4 byte after call */
- lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ LOAD_PER_CPU(r1, CURRENT_SAVE); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
/* calculate kernel stack pointer from task struct 8k */
@@ -316,10 +348,10 @@ C_ENTRY(_user_exception):
swi r0, r1, PT_R4
swi r0, r1, PT_MODE; /* Was in user-mode. */
- lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ LOAD_PER_CPU(r11, ENTRY_SP);
swi r11, r1, PT_R1; /* Store user SP. */
clear_ums;
-2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2: LOAD_PER_CPU(CURRENT_TASK, CURRENT_SAVE);
/* Save away the syscall number. */
swi r12, r1, PT_R0;
tovirt(r1,r1)
@@ -434,8 +466,8 @@ C_ENTRY(ret_from_trap):
/* Finally, return to user state. */
4: set_bip; /* Ints masked for state restore */
- swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
+ STORE_PER_CPU(CURRENT_TASK, CURRENT_SAVE); /* save current */
tophys(r1,r1);
RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
@@ -462,8 +494,8 @@ TRAP_return: /* Make global symbol for debugging */
saved context). */
C_ENTRY(ret_from_fork):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
- add r5, r3, r0; /* switch_thread returns the prev task */
- /* ( in the delay slot ) */
+ add r5, r3, r0; /* Arg 0: _switch_to places prev task struct */
+ /* pointer in r3 in the delay slot ) */
brid ret_from_trap; /* Do normal trap return */
add r3, r0, r0; /* Child's fork call should return 0. */
@@ -519,11 +551,11 @@ C_ENTRY(unaligned_data_trap):
* are used and they use r0 instead of r11.
* I am using ENTRY_SP which should be primary used only for stack
* pointer saving. */
- swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ STORE_PER_CPU(r11, ENTRY_SP);
set_bip; /* equalize initial state for all possible entries */
clear_eip;
set_ee;
- lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ LOAD_PER_CPU(r11, ENTRY_SP);
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PT_PC;
@@ -618,8 +650,8 @@ C_ENTRY(ret_from_exc):
/* Finally, return to user state. */
4: set_bip; /* Ints masked for state restore */
- swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
+ STORE_PER_CPU(CURRENT_TASK, CURRENT_SAVE); /* save current */
tophys(r1,r1);
RESTORE_REGS_RTBD;
@@ -653,7 +685,7 @@ EXC_return: /* Make global symbol for debugging */
C_ENTRY(_interrupt):
/* MS: we are in physical address */
/* Save registers, switch to proper stack, convert SP to virtual.*/
- swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ STORE_PER_CPU(r1, ENTRY_SP)
/* MS: See if already in kernel mode. */
mfs r1, rmsr
nop
@@ -661,7 +693,7 @@ C_ENTRY(_interrupt):
bnei r1, 1f
/* Kernel-mode state save. */
- lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ LOAD_PER_CPU(r1, ENTRY_SP)
tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
/* save registers */
/* MS: Make room on the stack -> activation record */
@@ -672,7 +704,7 @@ C_ENTRY(_interrupt):
1:
/* User-mode state save. */
/* MS: get the saved current */
- lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ LOAD_PER_CPU(r1, CURRENT_SAVE);
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO;
addik r1, r1, THREAD_SIZE;
@@ -682,11 +714,11 @@ C_ENTRY(_interrupt):
SAVE_REGS
/* calculate mode */
swi r0, r1, PT_MODE;
- lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ LOAD_PER_CPU(r11, ENTRY_SP);
swi r11, r1, PT_R1;
clear_ums;
2:
- lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ LOAD_PER_CPU(CURRENT_TASK, CURRENT_SAVE);
tovirt(r1,r1)
addik r15, r0, irq_call;
irq_call:rtbd r0, do_IRQ;
@@ -719,8 +751,8 @@ ret_from_irq:
no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
- swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
+ STORE_PER_CPU(CURRENT_TASK, CURRENT_SAVE);
tophys(r1,r1);
RESTORE_REGS
addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
@@ -759,14 +791,14 @@ IRQ_return: /* MS: Make global symbol for debugging */
*/
C_ENTRY(_debug_exception):
/* BIP bit is set on entry, no interrupts can occur */
- swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ STORE_PER_CPU(r1, ENTRY_SP)
mfs r1, rmsr
nop
andi r1, r1, MSR_UMS
bnei r1, 1f
/* MS: Kernel-mode state save - kgdb */
- lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+ LOAD_PER_CPU(r1, ENTRY_SP); /* Reload kernel stack-ptr*/
/* BIP bit is set on entry, no interrupts can occur */
addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
@@ -800,7 +832,7 @@ C_ENTRY(_debug_exception):
bri 0
/* MS: User-mode state save - gdb */
-1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+1: LOAD_PER_CPU(r1, CURRENT_SAVE); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
@@ -810,9 +842,9 @@ C_ENTRY(_debug_exception):
SAVE_REGS;
swi r16, r1, PT_PC; /* Save LP */
swi r0, r1, PT_MODE; /* Was in user-mode. */
- lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ LOAD_PER_CPU(r11, ENTRY_SP);
swi r11, r1, PT_R1; /* Store user SP. */
- lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ LOAD_PER_CPU(CURRENT_TASK, CURRENT_SAVE);
tovirt(r1,r1)
set_vms;
addik r5, r1, 0;
@@ -848,8 +880,8 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
bri 1b
/* Finally, return to user state. */
-4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
- VM_OFF;
+4: VM_OFF;
+ STORE_PER_CPU(CURRENT_TASK, CURRENT_SAVE); /* save current */
tophys(r1,r1);
/* MS: Restore all regs */
RESTORE_REGS_RTBD
@@ -918,7 +950,7 @@ ENTRY(_switch_to)
/* update r31, the current-give me pointer to task which will be next */
lwi CURRENT_TASK, r6, TI_TASK
/* stored it to current_save too */
- swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
+ STORE_PER_CPU_VM(CURRENT_TASK, CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 14b276406153..eb889ec53a1a 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/entry.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <linux/of_fdt.h> /* for OF_DT_HEADER */
@@ -38,6 +39,7 @@
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include <asm/mmu.h>
#include <asm/processor.h>
+#include <asm/asm-offsets.h>
.section .data
.global empty_zero_page
@@ -47,7 +49,11 @@ empty_zero_page:
.global swapper_pg_dir
swapper_pg_dir:
.space PAGE_SIZE
-
+#ifdef CONFIG_SMP
+temp_boot_stack:
+ .space 1024
+#define CURRENT_SAVE CURRENT_SAVE_ADDR
+#endif /* CONFIG_SMP */
#endif /* CONFIG_MMU */
.section .rodata
@@ -79,6 +85,13 @@ real_start:
msrclr r8, 0 /* clear nothing - just read msr for test */
cmpu r8, r8, r1 /* r1 must contain msr reg content */
+#ifdef CONFIG_SMP
+ /* skip FDT copy if secondary */
+ mfs r11, rpvr0
+ andi r11, r11, 0xFF
+ bnei r11, _setup_initial_mmu
+#endif /* CONFIG_SMP */
+
/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
We ensure r7 points to a valid FDT, just in case the bootloader
@@ -147,6 +160,7 @@ _copy_bram:
#endif
/* We have to turn on the MMU right away. */
+_setup_initial_mmu:
/*
* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 16 MBytes of memory 1:1
@@ -178,50 +192,51 @@ _invalidate:
tophys(r4,r3) /* Load the kernel physical address */
/* start to do TLB calculation */
- addik r12, r0, _end
+ addik r12, r0, _end_tlb_mapping
rsub r12, r3, r12
- addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */
or r9, r0, r0 /* TLB0 = 0 */
or r10, r0, r0 /* TLB1 = 0 */
- addik r11, r12, -0x1000000
- bgei r11, GT16 /* size is greater than 16MB */
- addik r11, r12, -0x0800000
- bgei r11, GT8 /* size is greater than 8MB */
- addik r11, r12, -0x0400000
- bgei r11, GT4 /* size is greater than 4MB */
- /* size is less than 4MB */
- addik r11, r12, -0x0200000
- bgei r11, GT2 /* size is greater than 2MB */
- addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
- addik r11, r12, -0x0100000
- bgei r11, GT1 /* size is greater than 1MB */
- /* TLB1 is 0 which is setup above */
- bri tlb_end
-GT4: /* r11 contains the rest - will be either 1 or 4 */
- ori r9, r0, 0x400000 /* TLB0 is 4MB */
- bri TLB1
-GT16: /* TLB0 is 16MB */
+ /*
+ * Linux is 4MB aligned that's why we can just check certain sizes.
+ * Add 12MB, 16MB and 8MB on the top of list because that's normal
+ * sizes which are often used.
+ */
+ addik r11, r12, -0xc00000 /* 12 MB */
+ beqi r11, GT12
+ addik r11, r12, -0x1000000 /* 16 MB */
+ beqi r11, GT16
+ addik r11, r12, -0x800000 /* 8 MB */
+ beqi r11, GT8
+ addik r11, r12, -0x2000000 /* 32 MB */
+ beqi r11, GT32
+ addik r11, r12, -0x1800000 /* 24 MB */
+ beqi r11, GT24
+ addik r11, r12, -0x1400000 /* 20 MB */
+ beqi r11, GT20
+ addik r11, r12, -0x400000 /* 4 MB */
+ beqi r11, GT4
+ /* if this page doesn't detect it use 32MB mapping */
+GT32:
addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
-TLB1:
- /* must be used r2 because of subtract if failed */
- addik r2, r11, -0x0400000
- bgei r2, GT20 /* size is greater than 16MB */
- /* size is >16MB and <20MB */
- addik r11, r11, -0x0100000
- bgei r11, GT17 /* size is greater than 17MB */
- /* kernel is >16MB and < 17MB */
-GT1:
- addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
- bri tlb_end
-GT2: /* TLB0 is 0 and TLB1 will be 4MB */
-GT17: /* TLB1 is 4MB - kernel size <20MB */
- addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
- bri tlb_end
-GT8: /* TLB0 is still zero that's why I can use only TLB1 */
-GT20: /* TLB1 is 16MB - kernel size >20MB */
+GT16:
addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
+ bri tlb_end
+GT24:
+ addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
+GT8:
+ addik r10, r0, 0x800000 /* means TLB1 is 8MB */
+ bri tlb_end
+GT20:
+ addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
+GT4:
+ addik r10, r0, 0x400000 /* means TLB1 is 4MB */
+ bri tlb_end
+GT12:
+ addik r9, r0, 0x800000 /* means TLB0 is 8MB */
+ addik r10, r0, 0x400000 /* means TLB1 is 4MB */
+ /* NOTE: No need to just to tlb_end here */
tlb_end:
/*
@@ -322,6 +337,18 @@ jump_over2:
*/
turn_on_mmu:
ori r15,r0,start_here
+#ifdef CONFIG_SMP
+ /*
+ * Read PVR and mask off all but CPU id bits to use to select
+ * boot sequence
+ */
+ mfs r4, rpvr0
+ andi r4, r4, 0xFF
+
+ beqi r4, finish
+ ori r15, r0, start_secondary_cpu
+finish:
+#endif /* CONFIG_SMP */
ori r4,r0,MSR_KERNEL_VMS
mts rmsr,r4
nop
@@ -340,6 +367,10 @@ start_here:
/* Initialize r31 with current task address */
addik r31, r0, init_task
+#ifdef CONFIG_MMU
+ /* save current for CPU 0 */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
+#endif
addik r11, r0, machine_early_init
brald r15, r11
@@ -384,3 +415,61 @@ kernel_load_context:
rted r17, 0 /* enable MMU and jump to start_kernel */
nop
#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_SMP
+/* Entry point for secondary processors */
+start_secondary_cpu:
+
+ /* Initialize small data anchors */
+ addik r13, r0, _KERNEL_SDA_BASE_
+ addik r2, r0, _KERNEL_SDA2_BASE_
+
+ /* Initialize stack pointer */
+ addik r1, r0, temp_boot_stack + 1024 - 4
+
+ /*
+ * Initialize the exception table.
+ */
+ addik r11, r0, secondary_machine_init
+ brald r15, r11
+ nop
+
+ lwi r1, r0, secondary_ti
+
+ /* Initialize r31 with current task address */
+ lwi CURRENT_TASK, r1, TI_TASK
+ /* save current for secondary CPU */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
+
+ /* Initialize stack pointer */
+ addi r1, r1, THREAD_SIZE - 4
+ swi r0, r1, 0
+
+ /* Initialize MMU */
+ ori r11, r0, 0x10000000
+ mts rzpr, r11
+
+ ori r15, r0, TOPHYS(kernel_load_context_secondary)
+ ori r4, r0, MSR_KERNEL
+ mts rmsr, r4
+ nop
+ bri 4
+ rted r15, 0
+ nop
+
+ /* Load up the kernel context */
+kernel_load_context_secondary:
+ # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
+ ori r5, r0, MICROBLAZE_LMB_TLB_ID
+ mts rtlbx, r5
+ nop
+ mts rtlbhi, r0
+ nop
+ addi r15, r0, machine_halt
+ ori r17, r0, start_secondary
+ ori r4, r0, MSR_KERNEL_VMS
+ mts rmsr, r4
+ nop
+ rted r17, 0 /* enable MMU and jump to start_kernel */
+ nop
+#endif /* CONFIG_SMP */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 95558f32d60a..6523339d42e5 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -252,10 +252,15 @@
/* wrappers to restore state before coming to entry.S */
#ifdef CONFIG_MMU
+#ifdef CONFIG_SMP
+#define CURRENT_SAVE __phys_to_virt(CURRENT_SAVE_ADDR)
+#define pt_pool_space __phys_to_virt(PT_POOL_SPACE_ADDR)
+#else
.section .data
.align 4
pt_pool_space:
.space PT_SIZE
+#endif /* CONFIG_SMP */
#ifdef DEBUG
/* Create space for exception counting. */
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 903dad822fad..0b37dde60a1e 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -20,29 +20,10 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
-static u32 concurrent_irq;
-
void __irq_entry do_IRQ(struct pt_regs *regs)
{
- unsigned int irq;
- struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
-
- irq_enter();
- irq = xintc_get_irq();
-next_irq:
- BUG_ON(!irq);
- generic_handle_irq(irq);
-
- irq = xintc_get_irq();
- if (irq != -1U) {
- pr_debug("next irq: %d\n", irq);
- ++concurrent_irq;
- goto next_irq;
- }
-
- irq_exit();
- set_irq_regs(old_regs);
+ handle_arch_irq(regs);
trace_hardirqs_on();
}
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
index 130cd0f064ce..11d63209938e 100644
--- a/arch/microblaze/kernel/kgdb.c
+++ b/arch/microblaze/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
@@ -105,6 +106,13 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[GDB_PVR + i] = pvr.pvr[i];
}
+#ifdef CONFIG_SMP
+void kgdb_roundup_cpus(void)
+{
+ smp_send_debugger_break();
+}
+#endif
+
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index dd121e33b8e3..90b8c070701f 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/of.h>
+#include <linux/smp.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/entry.h>
@@ -35,11 +36,23 @@
#include <asm/pgtable.h>
+#ifdef CONFIG_SMP
+static void __init smp_setup_cpu_maps(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ set_cpu_present(i, true);
+ set_cpu_possible(i, true);
+ }
+}
+#else
DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+#endif /* CONFIG_SMP */
/*
* Placed cmd_line to .data section because can be initialized from
@@ -63,6 +76,9 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
xilinx_pci_init();
+#ifdef CONFIG_SMP
+ smp_setup_cpu_maps();
+#endif
}
#ifdef CONFIG_MTD_UCLINUX
@@ -132,7 +148,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
if (fdt)
pr_info("FDT at 0x%08x\n", fdt);
else
- pr_info("Compiled-in FDT at %p\n", _fdt_start);
+ pr_info("Compiled-in FDT at 0x%08x\n", (unsigned)&_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
pr_info("Found romfs @ 0x%08x (0x%08x)\n",
@@ -169,8 +185,10 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
*dst = *src;
/* Initialize global data */
+#ifndef CONFIG_SMP
per_cpu(KM, 0) = 0x1; /* We start in kernel mode */
per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
+#endif
}
void __init time_init(void)
diff --git a/arch/microblaze/kernel/smp.c b/arch/microblaze/kernel/smp.c
new file mode 100644
index 000000000000..424e8b3327cf
--- /dev/null
+++ b/arch/microblaze/kernel/smp.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SMP support for MicroBlaze, borrowing a great
+ * deal of code from the PowerPC implementation
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ * Copyright (C) 2013-2020 Xilinx, Inc.
+ */
+
+#include <linux/atomic.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/profile.h>
+#include <linux/sched/task.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+
+#include <asm/barrier.h>
+#include <asm/cpuinfo.h>
+#include <asm/tlbflush.h>
+
+struct thread_info *secondary_ti;
+
+static struct thread_info *current_set[NR_CPUS];
+
+unsigned long irq_err_count;
+
+static unsigned int boot_cpuid;
+
+static DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
+
+static volatile unsigned int cpu_callin_map[NR_CPUS];
+
+static void (*crash_ipi_function_ptr)(struct pt_regs *);
+
+static const char * const smp_ipi_name[] = {
+ [MICROBLAZE_MSG_RESCHEDULE] = "ipi reschedule",
+ [MICROBLAZE_MSG_CALL_FUNCTION] = "ipi call function",
+ [MICROBLAZE_MSG_CALL_FUNCTION_SINGLE] = "ipi call function single",
+ [MICROBLAZE_MSG_DEBUGGER_BREAK] = "ipi debugger",
+};
+
+/* Functions for recording IPI handler */
+static void (*__smp_cross_call)(unsigned int, unsigned int);
+
+void __init set_smp_cross_call(void (*fn)(unsigned int, unsigned int))
+{
+ if (!__smp_cross_call)
+ __smp_cross_call = fn;
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
+u64 smp_irq_stat_cpu(unsigned int cpu)
+{
+ u64 sum = 0;
+ int i;
+
+ for (i = 0; i < MICROBLAZE_NUM_IPIS; i++)
+ sum += __get_irq_stat(cpu, ipi_irqs[i]);
+
+ return sum;
+}
+
+static void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < MICROBLAZE_NUM_IPIS; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ",
+ __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, " %s\n", smp_ipi_name[i]);
+ }
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ show_ipi_list(p, prec);
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
+ return 0;
+}
+
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned int cpu = smp_processor_id();
+
+ pr_debug("%s: cpu: %d got IPI: %d\n", __func__, cpu, ipinr);
+
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+
+ switch (ipinr) {
+ case MICROBLAZE_MSG_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case MICROBLAZE_MSG_CALL_FUNCTION:
+ generic_smp_call_function_interrupt();
+ break;
+ case MICROBLAZE_MSG_CALL_FUNCTION_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+ case MICROBLAZE_MSG_DEBUGGER_BREAK:
+ if (crash_ipi_function_ptr)
+ crash_ipi_function_ptr(get_irq_regs());
+ break;
+ default:
+ BUG();
+ }
+
+ set_irq_regs(old_regs);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ if (cpu_online(cpu))
+ __smp_cross_call(cpu, MICROBLAZE_MSG_RESCHEDULE);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ if (cpu_online(cpu))
+ __smp_cross_call(cpu, MICROBLAZE_MSG_CALL_FUNCTION_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask)
+ __smp_cross_call(cpu, MICROBLAZE_MSG_CALL_FUNCTION);
+}
+
+#ifdef CONFIG_KGDB
+void smp_send_debugger_break(void)
+{
+ int cpu;
+ int me = raw_smp_processor_id();
+
+ for_each_online_cpu(cpu)
+ if (cpu != me)
+ __smp_cross_call(cpu, MICROBLAZE_MSG_DEBUGGER_BREAK);
+}
+
+void crash___smp_cross_call(void (*crash_ipi_callback)(struct pt_regs *))
+{
+ crash_ipi_function_ptr = crash_ipi_callback;
+ if (crash_ipi_callback) {
+ mb();
+ smp_send_debugger_break();
+ }
+}
+#endif
+
+static void stop_this_cpu(void *dummy)
+{
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
+ local_irq_disable();
+ while (1)
+ ;
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+static void __init smp_create_idle(unsigned int cpu)
+{
+ struct task_struct *p;
+
+ /* create a process for the processor */
+ p = fork_idle(cpu);
+ if (IS_ERR(p)) {
+ panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+ pr_alert("failed to create cpu %d idle\n", cpu);
+ }
+
+ task_thread_info(p)->cpu = cpu;
+ current_set[cpu] = task_thread_info(p);
+
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned int cpu;
+
+ /*
+ * setup_cpu may need to be called on the boot cpu. We havent
+ * spun any cpus up but lets be paranoid.
+ */
+ BUG_ON(boot_cpuid != smp_processor_id());
+
+ /* Fixup boot cpu */
+ cpu_callin_map[boot_cpuid] = 1;
+
+ for_each_possible_cpu(cpu) {
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ }
+
+ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+
+ max_cpus = NR_CPUS;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu != boot_cpuid)
+ smp_create_idle(cpu);
+ }
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+ BUG_ON(smp_processor_id() != boot_cpuid);
+ current_set[boot_cpuid] = task_thread_info(current);
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ int c;
+
+ secondary_ti = current_set[cpu];
+
+ /*
+ * Make sure callin-map entry is 0 (can be leftover a CPU
+ * hotplug
+ */
+ cpu_callin_map[cpu] = 0;
+
+ /*
+ * The information for processor bringup must
+ * be written out to main store before we release
+ * the processor.
+ */
+ smp_mb();
+
+ /* wake up cpu */
+ pr_alert("From cpu %d: Waking CPU %d\n", smp_processor_id(), cpu);
+
+ __smp_cross_call(cpu, 0);
+
+ if (system_state < SYSTEM_RUNNING)
+ for (c = 10000; c && !cpu_callin_map[cpu]; c--)
+ udelay(100);
+
+ if (!cpu_callin_map[cpu]) {
+ pr_err("Processor %u is stuck.\n", cpu);
+ return -ENOENT;
+ }
+
+ while (!cpu_online(cpu))
+ cpu_relax();
+
+ pr_alert("Processor %u found.\n", cpu);
+
+ return 0;
+}
+
+asmlinkage void __init secondary_machine_init(void)
+{
+ unsigned long *src, *dst;
+ unsigned int offset = 0;
+
+ /*
+ * Do not copy reset vectors. offset = 0x2 means skip the first
+ * two instructions. dst is pointer to MB vectors which are placed
+ * in block ram. If you want to copy reset vector setup offset to 0x0
+ */
+#if !CONFIG_MANUAL_RESET_VECTOR
+ offset = 0x2;
+#endif
+ dst = (unsigned long *) (offset * sizeof(u32));
+ for (src = __ivt_start + offset; src < __ivt_end; src++, dst++)
+ *dst = *src;
+}
+
+/* Activate a secondary processor. */
+void __init start_secondary(void) // FIXME this is not __init
+{
+ unsigned int cpu = smp_processor_id();
+ int i;
+
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
+ local_flush_tlb_mm(&init_mm);
+
+ pr_alert("cpu: %d alive\n", cpu);
+
+ setup_cpuinfo();
+ microblaze_cache_init();
+
+ preempt_disable();
+
+ /* calibrate_delay(); */
+
+ cpu_callin_map[cpu] = 1;
+
+ notify_cpu_starting(cpu);
+
+ set_cpu_online(cpu, true);
+
+ for_each_online_cpu(i) {
+ cpumask_set_cpu(cpu, cpu_core_mask(i));
+ cpumask_set_cpu(i, cpu_core_mask(cpu));
+ }
+ local_irq_enable();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+
+ BUG();
+}
+
+#ifdef CONFIG_PROFILING
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{ }
diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh
index a914854f8d9f..17849efbc099 100644
--- a/arch/microblaze/kernel/syscalls/syscallhdr.sh
+++ b/arch/microblaze/kernel/syscalls/syscallhdr.sh
@@ -33,4 +33,5 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
printf "#endif\n"
printf "\n"
printf "#endif /* %s */\n" "${fileguard}"
+ printf "\n"
) > "$out"
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index f8832cf49384..baeca72486b0 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -16,15 +16,22 @@
#include <linux/sched_clock.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/timecounter.h>
#include <asm/cpuinfo.h>
-static void __iomem *timer_baseaddr;
+static void __iomem *clocksource_baseaddr;
-static unsigned int freq_div_hz;
-static unsigned int timer_clock_freq;
+struct xilinx_timer {
+ void __iomem *timer_baseaddr;
+ u32 irq;
+ unsigned int freq_div_hz;
+ unsigned int timer_clock_freq;
+};
+
+static DEFINE_PER_CPU(struct xilinx_timer, timer_priv);
#define TCSR0 (0x00)
#define TLR0 (0x04)
@@ -70,12 +77,21 @@ static unsigned int timer_read32_be(void __iomem *addr)
static inline void xilinx_timer0_stop(void)
{
+ int cpu = smp_processor_id();
+ struct xilinx_timer *timer = per_cpu_ptr(&timer_priv, cpu);
+ void __iomem *timer_baseaddr = timer->timer_baseaddr;
+
write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT,
timer_baseaddr + TCSR0);
}
-static inline void xilinx_timer0_start_periodic(unsigned long load_val)
+static inline void xilinx_timer0_start_periodic(void)
{
+ int cpu = smp_processor_id();
+ struct xilinx_timer *timer = per_cpu_ptr(&timer_priv, cpu);
+ void __iomem *timer_baseaddr = timer->timer_baseaddr;
+ unsigned long load_val = timer->freq_div_hz;
+
if (!load_val)
load_val = 1;
/* loading value to timer reg */
@@ -103,6 +119,10 @@ static inline void xilinx_timer0_start_periodic(unsigned long load_val)
static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
{
+ int cpu = smp_processor_id();
+ struct xilinx_timer *timer = per_cpu_ptr(&timer_priv, cpu);
+ void __iomem *timer_baseaddr = timer->timer_baseaddr;
+
if (!load_val)
load_val = 1;
/* loading value to timer reg */
@@ -133,11 +153,11 @@ static int xilinx_timer_shutdown(struct clock_event_device *evt)
static int xilinx_timer_set_periodic(struct clock_event_device *evt)
{
pr_info("%s\n", __func__);
- xilinx_timer0_start_periodic(freq_div_hz);
+ xilinx_timer0_start_periodic();
return 0;
}
-static struct clock_event_device clockevent_xilinx_timer = {
+static DEFINE_PER_CPU(struct clock_event_device, clockevent_xilinx_timer) = {
.name = "xilinx_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC,
@@ -150,37 +170,75 @@ static struct clock_event_device clockevent_xilinx_timer = {
static inline void timer_ack(void)
{
+ int cpu = smp_processor_id();
+ struct xilinx_timer *timer = per_cpu_ptr(&timer_priv, cpu);
+ void __iomem *timer_baseaddr = timer->timer_baseaddr;
+
write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = &clockevent_xilinx_timer;
+ struct clock_event_device *evt = dev_id;
+
timer_ack();
evt->event_handler(evt);
return IRQ_HANDLED;
}
-static __init int xilinx_clockevent_init(void)
+static __init int xilinx_clockevent_init(int cpu, struct xilinx_timer *timer)
{
- clockevent_xilinx_timer.mult =
- div_sc(timer_clock_freq, NSEC_PER_SEC,
- clockevent_xilinx_timer.shift);
- clockevent_xilinx_timer.max_delta_ns =
- clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer);
- clockevent_xilinx_timer.max_delta_ticks = (u32)~0;
- clockevent_xilinx_timer.min_delta_ns =
- clockevent_delta2ns(1, &clockevent_xilinx_timer);
- clockevent_xilinx_timer.min_delta_ticks = 1;
- clockevent_xilinx_timer.cpumask = cpumask_of(0);
- clockevents_register_device(&clockevent_xilinx_timer);
+ struct clock_event_device *ce = per_cpu_ptr(&clockevent_xilinx_timer,
+ cpu);
+
+ ce->mult = div_sc(timer->timer_clock_freq, NSEC_PER_SEC, ce->shift);
+ ce->max_delta_ns = clockevent_delta2ns((u32)~0, ce);
+ ce->max_delta_ticks = (u32)~0;
+ ce->min_delta_ns = clockevent_delta2ns(1, ce);
+ ce->min_delta_ticks = 1;
+ ce->cpumask = cpumask_of(cpu);
+ clockevents_register_device(ce);
+
+ return 0;
+}
+
+static int microblaze_timer_starting(unsigned int cpu)
+{
+ int ret;
+ struct xilinx_timer *timer = per_cpu_ptr(&timer_priv, cpu);
+ struct clock_event_device *ce = per_cpu_ptr(&clockevent_xilinx_timer,
+ cpu);
+
+ pr_debug("%s: cpu %d\n", __func__, cpu);
+
+ if (!timer->timer_baseaddr) {
+ /* It should never fail */
+ pr_err("%s: clockevent timer for cpu %d failed\n",
+ __func__, cpu);
+ return -EINVAL;
+ }
+
+ ret = request_irq(timer->irq, timer_interrupt, IRQF_TIMER |
+ IRQF_PERCPU | IRQF_NOBALANCING,
+ "timer", ce);
+ if (ret) {
+ pr_err("%s: request_irq failed\n", __func__);
+ return ret;
+ }
+
+ return xilinx_clockevent_init(cpu, timer);
+}
+
+static int microblaze_timer_dying(unsigned int cpu)
+{
+ pr_debug("%s: cpu %d\n", __func__, cpu);
return 0;
}
static u64 xilinx_clock_read(void)
{
- return read_fn(timer_baseaddr + TCR1);
+ return read_fn(clocksource_baseaddr + TCR0);
}
static u64 xilinx_read(struct clocksource *cs)
@@ -204,16 +262,6 @@ static struct cyclecounter xilinx_cc = {
.shift = 8,
};
-static int __init init_xilinx_timecounter(void)
-{
- xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
- xilinx_cc.shift);
-
- timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock());
-
- return 0;
-}
-
static struct clocksource clocksource_microblaze = {
.name = "xilinx_clocksource",
.rating = 300,
@@ -222,7 +270,7 @@ static struct clocksource clocksource_microblaze = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static int __init xilinx_clocksource_init(void)
+static int __init xilinx_clocksource_init(unsigned int timer_clock_freq)
{
int ret;
@@ -234,27 +282,54 @@ static int __init xilinx_clocksource_init(void)
}
/* stop timer1 */
- write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
- timer_baseaddr + TCSR1);
+ write_fn(read_fn(clocksource_baseaddr + TCSR0) & ~TCSR_ENT,
+ clocksource_baseaddr + TCSR0);
/* start timer1 - up counting without interrupt */
- write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
+ write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, clocksource_baseaddr + TCSR0);
/* register timecounter - for ftrace support */
- return init_xilinx_timecounter();
+ xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
+ xilinx_cc.shift);
+
+ timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock());
+
+ sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+
+ return 0;
}
static int __init xilinx_timer_init(struct device_node *timer)
{
struct clk *clk;
static int initialized;
- u32 irq;
u32 timer_num = 1;
- int ret;
-
- if (initialized)
- return -EINVAL;
+ int ret = 0, cpu_id = 0;
+ void __iomem *timer_baseaddr;
+ unsigned int timer_clock_freq;
+ bool clocksource = false;
+ bool clockevent = false;
+
+ ret = of_property_read_u32(timer, "cpu-id", &cpu_id);
+ if (!ret && NR_CPUS > 1) {
+ /* cpu_id will say if this is clocksource or clockevent */
+ if (cpu_id >= NR_CPUS)
+ clocksource = true;
+ else
+ clockevent = true;
+ } else {
+ /* No cpu_id property continue to work in old style */
+ clocksource = true;
+ clockevent = true;
+ }
- initialized = 1;
+ if (clocksource) {
+ /* TODO Add support for clocksource from one timer only */
+ of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
+ if (timer_num) {
+ pr_err("%pOF: Please enable two timers in HW\n", timer);
+ return -EINVAL;
+ }
+ }
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
@@ -271,20 +346,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
read_fn = timer_read32_be;
}
- irq = irq_of_parse_and_map(timer, 0);
- if (irq <= 0) {
- pr_err("Failed to parse and map irq");
- return -EINVAL;
- }
-
- of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
- if (timer_num) {
- pr_err("Please enable two timers in HW\n");
- return -EINVAL;
- }
-
- pr_info("%pOF: irq=%d\n", timer, irq);
-
clk = of_clk_get(timer, 0);
if (IS_ERR(clk)) {
pr_err("ERROR: timer CCF input clock not found\n");
@@ -297,29 +358,65 @@ static int __init xilinx_timer_init(struct device_node *timer)
if (!timer_clock_freq) {
pr_err("ERROR: Using CPU clock frequency\n");
- timer_clock_freq = cpuinfo.cpu_clock_freq;
+ return -EINVAL;
}
- freq_div_hz = timer_clock_freq / HZ;
-
- ret = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer",
- &clockevent_xilinx_timer);
- if (ret) {
- pr_err("Failed to setup IRQ");
- return ret;
+ if (clocksource) {
+ if (clocksource_baseaddr) {
+ pr_err("%s: cpu %d has already clocksource timer\n",
+ __func__, cpu_id);
+ return -EINVAL;
+ }
+
+ /* At this point we know that clocksource timer is second one */
+ clocksource_baseaddr = timer_baseaddr + TCSR1;
+ pr_info("%s: Timer base: 0x%x, Clocksource base: 0x%x\n",
+ __func__, (u32)timer_baseaddr,
+ (u32)clocksource_baseaddr);
+
+ ret = xilinx_clocksource_init(timer_clock_freq);
+ if (ret)
+ return ret;
}
- ret = xilinx_clocksource_init();
- if (ret)
- return ret;
-
- ret = xilinx_clockevent_init();
- if (ret)
- return ret;
-
- sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+ if (clockevent) {
+ struct xilinx_timer *timer_st;
+
+ /* Record what we know already */
+ timer_st = per_cpu_ptr(&timer_priv, cpu_id);
+ if (timer_st->timer_baseaddr) {
+ pr_err("%s: cpu %d has already clockevent timer\n",
+ __func__, cpu_id);
+ return -EINVAL;
+ }
+
+ timer_st->timer_baseaddr = timer_baseaddr;
+
+ timer_st->irq = irq_of_parse_and_map(timer, 0);
+ if (timer_st->irq <= 0) {
+ pr_err("Failed to parse and map irq");
+ return -EINVAL;
+ }
+
+ pr_info("%pOF: irq=%d, cpu_id %d\n",
+ timer, timer_st->irq, cpu_id);
+
+ timer_st->timer_clock_freq = timer_clock_freq;
+
+ timer_st->freq_div_hz = timer_clock_freq / HZ;
+
+ /* Can't call it several times */
+ if (!initialized && !cpu_id) {
+ ret = cpuhp_setup_state(CPUHP_AP_MICROBLAZE_TIMER_STARTING,
+ "clockevents/microblaze/arch_timer:starting",
+ microblaze_timer_starting,
+ microblaze_timer_dying);
+ if (!ret)
+ initialized++;
+ }
+ }
- return 0;
+ return ret;
}
TIMER_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index df07b3d06cd6..9894210bf040 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -120,12 +120,6 @@ SECTIONS {
CON_INITCALL
}
- __init_end_before_initramfs = .;
-
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- INIT_RAM_FS
- }
-
__init_end = .;
.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
@@ -138,6 +132,17 @@ SECTIONS {
}
. = ALIGN(PAGE_SIZE);
_end = .;
+ /* Add space in TLB mapping for early free pages mapping */
+ . = . + 0x100000; /* CONFIG_LOWMEM_SIZE >> PTE_SHIFT + space */
+
+ /* And alignment to make sure that we map only kernel by TLBs */
+ . = ALIGN(0x400000);
+ _end_tlb_mapping = . ;
+
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+ INIT_RAM_FS
+ }
+ __initramfs_end = . ;
DISCARDS
}
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index e09b66e43cb6..3937f541ef02 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -35,7 +35,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* I have to use dcache values because I can't relate on ram size:
*/
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
-#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
+#define UNCACHED_SHADOW_MASK (cpuinfo->dcache_high - cpuinfo->dcache_base + 1)
#else
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
@@ -43,9 +43,11 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo *cpuinfo = per_cpu_ptr(&cpu_info, cpu);
addr |= UNCACHED_SHADOW_MASK;
- if (addr > cpuinfo.dcache_base && addr < cpuinfo.dcache_high)
+ if (addr > cpuinfo->dcache_base && addr < cpuinfo->dcache_high)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 1ffbfa96b9b8..c9cac2fe7a17 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -267,6 +267,7 @@ static void __init mmu_init_hw(void)
asmlinkage void __init mmu_init(void)
{
unsigned int kstart, ksize;
+ phys_addr_t __maybe_unused size;
if (!memblock.reserved.cnt) {
pr_emerg("Error memory count\n");
@@ -308,10 +309,14 @@ asmlinkage void __init mmu_init(void)
#if defined(CONFIG_BLK_DEV_INITRD)
/* Remove the init RAM disk from the available memory. */
if (initrd_start) {
- unsigned long size;
size = initrd_end - initrd_start;
memblock_reserve(__virt_to_phys(initrd_start), size);
}
+
+ size = __initramfs_end - __initramfs_start;
+ if (size)
+ memblock_reserve((phys_addr_t)__virt_to_phys(__initramfs_start),
+ size);
#endif /* CONFIG_BLK_DEV_INITRD */
/* Initialize the MMU hardware */
@@ -349,7 +354,8 @@ void __init *early_get_page(void)
* because of mem mapping from head.S
*/
return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
- MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
+ memory_start,
+ (phys_addr_t)__virt_to_phys(_end_tlb_mapping),
NUMA_NO_NODE);
}
diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c
index cbc234816786..7c2fc6eeaac5 100644
--- a/arch/microblaze/mm/mmu_context.c
+++ b/arch/microblaze/mm/mmu_context.c
@@ -2,7 +2,11 @@
/*
* This file contains the routines for handling the MMU.
*
- * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2007,2013-2020 Xilinx, Inc. All rights reserved.
+ *
+ * Derived from arch/powerpc/mm/mmu_context_nohash.c
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
*
* Derived from arch/ppc/mm/4xx_mmu.c:
* -- paulus
@@ -19,47 +23,248 @@
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-mm_context_t next_mmu_context;
-unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-atomic_t nr_free_contexts;
-struct mm_struct *context_mm[LAST_CONTEXT+1];
+static unsigned int next_context, nr_free_contexts;
+static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+#ifdef CONFIG_SMP
+static unsigned long stale_map[LAST_CONTEXT / BITS_PER_LONG + 1][NR_CPUS];
+#endif
+static struct mm_struct *context_mm[LAST_CONTEXT + 1];
+static DEFINE_RAW_SPINLOCK(context_lock);
/*
- * Initialize the context management stuff.
+ * Steal a context from a task that has one at the moment.
+ *
+ * This is used when we are running out of available PID numbers
+ * on the processors.
+ *
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone were motivated to do it.
+ *
+ * For context stealing, we use a slightly different approach for
+ * SMP and UP. Basically, the UP one is simpler and doesn't use
+ * the stale map as we can just flush the local CPU.
*/
-void __init mmu_context_init(void)
+#ifdef CONFIG_SMP
+static unsigned int steal_context_smp(unsigned int id)
{
- /*
- * The use of context zero is reserved for the kernel.
- * This code assumes FIRST_CONTEXT < 32.
+ struct mm_struct *mm;
+ unsigned int cpu, max;
+
+ max = LAST_CONTEXT - FIRST_CONTEXT;
+
+ /* Attempt to free next_context first and then loop until we manage */
+ while (max--) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ /* We have a candidate victim, check if it's active, on SMP
+ * we cannot steal active contexts
+ */
+ if (mm->context.active) {
+ id++;
+ if (id > LAST_CONTEXT)
+ id = FIRST_CONTEXT;
+ continue;
+ }
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Mark it stale on all CPUs that used this mm. */
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ __set_bit(id, stale_map[cpu]);
+ }
+ return id;
+ }
+
+ /* This will happen if you have more CPUs than available contexts,
+ * all we can do here is wait a bit and try again
*/
- context_map[0] = (1 << FIRST_CONTEXT) - 1;
- next_mmu_context = FIRST_CONTEXT;
- atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+ raw_spin_unlock(&context_lock);
+ cpu_relax();
+ raw_spin_lock(&context_lock);
+
+ /* This will cause the caller to try again */
+ return MMU_NO_CONTEXT;
}
+#endif /* CONFIG_SMP */
/*
- * Steal a context from a task that has one at the moment.
- *
- * This isn't an LRU system, it just frees up each context in
- * turn (sort-of pseudo-random replacement :). This would be the
- * place to implement an LRU scheme if anyone were motivated to do it.
+ * Note that this will also be called on SMP if all other CPUs are
+ * offlined, which means that it may be called for cpu != 0. For
+ * this to work, we somewhat assume that CPUs that are onlined
+ * come up with a fully clean TLB (or are cleaned when offlined)
*/
-void steal_context(void)
+static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+
+ /* Flush the TLB for that context */
+ local_flush_tlb_mm(mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* TODO: This clear should ultimately be part of local_flush_tlb_mm */
+#ifdef CONFIG_SMP
+ __clear_bit(id, stale_map[cpu]);
+#endif
+
+ return id;
+}
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned int id;
+#ifdef CONFIG_SMP
+ unsigned int cpu = smp_processor_id();
+#endif
+ unsigned long *map;
+
+ /* No lockless fast path .. yet */
+ raw_spin_lock(&context_lock);
+
+#ifdef CONFIG_SMP
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
- /* free up context `next_mmu_context' */
- /* if we shouldn't free context 0, don't... */
- if (next_mmu_context < FIRST_CONTEXT)
- next_mmu_context = FIRST_CONTEXT;
- mm = context_mm[next_mmu_context];
- flush_tlb_mm(mm);
- destroy_context(mm);
+ again:
+#endif /* CONFIG_SMP */
+
+ /* If we already have a valid assigned context, skip all that */
+ id = next->context.id;
+ if (likely(id != MMU_NO_CONTEXT))
+ goto ctxt_ok;
+
+ /* We really don't have a context, let's try to acquire one */
+ id = next_context;
+ if (id > LAST_CONTEXT)
+ id = FIRST_CONTEXT;
+ map = context_map;
+
+ /* No more free contexts, let's try to steal one */
+ if (nr_free_contexts == 0) {
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ id = steal_context_smp(id);
+ if (id == MMU_NO_CONTEXT)
+ goto again;
+ goto stolen;
+ }
+#endif /* CONFIG_SMP */
+ id = steal_context_up(id);
+ goto stolen;
+ }
+ nr_free_contexts--;
+
+ /* We know there's at least one free context, try to find it */
+ while (__test_and_set_bit(id, map)) {
+ id = find_next_zero_bit(map, LAST_CONTEXT + 1, id);
+ if (id > LAST_CONTEXT)
+ id = FIRST_CONTEXT;
+ }
+ stolen:
+ next_context = id + 1;
+ context_mm[id] = next;
+ next->context.id = id;
+
+ ctxt_ok:
+
+ /* If that context got marked stale on this CPU, then flush the
+ * local TLB for it and unmark it before we use it
+ */
+#ifdef CONFIG_SMP
+ if (test_bit(id, stale_map[cpu])) {
+ local_flush_tlb_mm(next);
+
+ /*
+ * TODO: This clear should ultimately be part of
+ * local_flush_tlb_mm
+ */
+ __clear_bit(id, stale_map[cpu]);
+ }
+#endif
+
+ /* Flick the MMU and release lock */
+ set_context(id, next->pgd);
+ raw_spin_unlock(&context_lock);
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = MMU_NO_CONTEXT;
+ mm->context.active = 0;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+ unsigned int id;
+
+ if (mm->context.id == MMU_NO_CONTEXT)
+ return;
+
+ WARN_ON(mm->context.active != 0);
+
+ raw_spin_lock_irqsave(&context_lock, flags);
+ id = mm->context.id;
+ if (id != MMU_NO_CONTEXT) {
+ __clear_bit(id, context_map);
+ mm->context.id = MMU_NO_CONTEXT;
+ mm->context.active = 0;
+ context_mm[id] = NULL;
+ nr_free_contexts++;
+ }
+ raw_spin_unlock_irqrestore(&context_lock, flags);
+}
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /*
+ * Mark init_mm as being active on all possible CPUs since
+ * we'll get called with prev == init_mm the first time
+ * we schedule on a given CPU
+ */
+ init_mm.context.active = NR_CPUS;
+
+ /*
+ * The use of context zero is reserved for the kernel.
+ * This code assumes FIRST_CONTEXT < 32.
+ */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_context = FIRST_CONTEXT;
+ nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
}
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 60a58c0015f2..5a9d0f97819a 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -141,7 +141,7 @@ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
return NULL;
}
-void pcibios_set_master(struct pci_dev *dev)
+void __weak pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
@@ -555,37 +555,7 @@ int pci_proc_domain(struct pci_bus *bus)
*/
static void pcibios_fixup_resources(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- int i;
- if (!hose) {
- pr_err("No host bridge for PCI dev %s !\n",
- pci_name(dev));
- return;
- }
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- struct resource *res = dev->resource + i;
- if (!res->flags)
- continue;
- if (res->start == 0) {
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- pr_debug("is unassigned\n");
- res->end -= res->start;
- res->start = 0;
- res->flags |= IORESOURCE_UNSET;
- continue;
- }
-
- pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
- pci_name(dev), i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags);
- }
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index d9a4c6c691e0..cb5622df4a27 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -626,6 +626,7 @@ static int ll_setup(struct hci_uart *hu)
gpiod_set_value_cansleep(lldev->enable_gpio, 0);
msleep(5);
gpiod_set_value_cansleep(lldev->enable_gpio, 1);
+ msleep(500);
err = serdev_device_wait_for_cts(serdev, true, 200);
if (err) {
bt_dev_err(hu->hdev, "Failed to get CTS");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index bcb257baed06..4468318367e0 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -143,6 +143,39 @@ config COMMON_CLK_BM1880
help
This driver supports the clocks on Bitmain BM1880 SoC.
+config COMMON_CLK_SI5324
+ tristate "Clock driver for SiLabs 5324 and compatible devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help--
+ This driver supports Silicon Labs 5324/5319/5328 programmable
+ clock generators. Dynamic programming of the oscillator is done
+ via I2C.
+
+config COMMON_CLK_IDT8T49N24X
+ tristate "Clock driver for IDT 8T49N24x"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ ---help---
+ This driver supports the IDT 8T49N24x universal frequency translator
+ product family. The only chip in the family that is currently
+ supported is the 8T49N241. The driver supports setting the rate for
+ all four outputs on the chip and automatically calculating/setting
+ the appropriate VCO value.
+
+ The driver can read a full register map from the DT,
+ and will use that register map to initialize the attached part
+ (via I2C) when the system boots. Any configuration not supported
+ by the common clock framework must be done via the full register
+ map, including optimized settings.
+
+ All outputs are currently assumed to be LVDS, unless overridden
+ in the full register map in the DT.
+
config COMMON_CLK_CDCE706
tristate "Clock driver for TI CDCE706 clock synthesizer"
depends on I2C
@@ -339,6 +372,21 @@ config COMMON_CLK_FIXED_MMIO
help
Support for Memory Mapped IO Fixed clocks
+config COMMON_CLK_XLNX_CLKWZRD
+ tristate "Xilinx Clocking Wizard"
+ depends on COMMON_CLK && OF
+ help
+ Support for the Xilinx Clocking Wizard IP core clock generator.
+
+config COMMON_CLK_XLNX_CLKWZRD_V
+ tristate "Xilinx Versal Clocking Wizard"
+ depends on COMMON_CLK && OF
+ help
+ Support for the Versal Xilinx Clocking Wizard IP core clock generator.
+ Adds support for Versal clocking wizard 1.0 and compatible.
+ This driver supports the Xilinx clocking wizard programmable clock
+ synthesizer. The number of output is configurable in the design.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/bcm/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 60e811d3f226..5243c1fad2bd 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -61,6 +61,8 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
+clk-si5324drv-y := si5324drv.o clk-si5324.o
+obj-$(CONFIG_COMMON_CLK_SI5324) += clk-si5324drv.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
@@ -68,6 +70,8 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD_V) += clk-xlnx-clock-wizard-v.o
# please keep this section sorted lexicographically by directory path name
obj-y += actions/
@@ -80,6 +84,7 @@ obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
+obj-$(CONFIG_COMMON_CLK_IDT8T49N24X) += idt/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8b343e59dc61..3d14334591a4 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -163,6 +163,7 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
struct clk_hw *hw;
const char *clk_name = node->name;
+ const char *parent_name = NULL;
unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -180,6 +181,9 @@ static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
}
of_property_read_string(node, "clock-output-names", &clk_name);
+ parent_name = of_clk_get_parent_name(node, 0);
+ if (!parent_name)
+ return ERR_PTR(-EPROBE_DEFER);
if (of_match_node(set_rate_parent_matches, node))
flags |= CLK_SET_RATE_PARENT;
@@ -211,8 +215,8 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
{
_of_fixed_factor_clk_setup(node);
}
-CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
- of_fixed_factor_clk_setup);
+CLK_OF_DECLARE_DRIVER(fixed_factor_clk, "fixed-factor-clock",
+ of_fixed_factor_clk_setup);
static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
diff --git a/drivers/clk/clk-si5324.c b/drivers/clk/clk-si5324.c
new file mode 100644
index 000000000000..7cfe75d7e6a4
--- /dev/null
+++ b/drivers/clk/clk-si5324.c
@@ -0,0 +1,1227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * clk-si5324.c - Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clk-si5324.h"
+#include "si5324.h"
+#include "si5324drv.h"
+
+struct si5324_driver_data;
+
+/**
+ * struct si5324_parameters - si5324 core parameters
+ *
+ * @n1_hs_min: Minimum high-speed n1 output divider
+ * @n1_hs_max: Maximum high-speed n1 output divider
+ * @n1_hs: n1 high-speed output divider
+ * @nc1_ls_min: Minimum low-speed clkout1 output divider
+ * @nc1_ls_max: Maximum low-speed clkout1 output divider
+ * @nc1_ls: Clkout1 low-speed output divider
+ * @nc2_ls_min: Minimum low-speed clkout2 output divider
+ * @nc2_ls_max: Maximum low-speed clkout2 output divider
+ * @nc2_ls: Clkout2 low-speed output divider
+ * @n2_hs: High-speed feedback divider
+ * @n2_ls_min: Minimum low-speed feedback divider
+ * @n2_ls_max: Maximum low-speed feedback divider
+ * @n2_ls: Low-speed feedback divider
+ * @n31_min: Minimum input divider for clk1
+ * @n31_max: Maximum input divider for clk1
+ * @n31: Input divider for clk1
+ * @n32_min: Minimum input divider for clk2
+ * @n32_max: Maximum input divider for clk2
+ * @n32: Input divider for clk2
+ * @fin: Input frequency
+ * @fout: Output frequency
+ * @fosc: Osc frequency
+ * @best_delta_fout: Delta out frequency
+ * @best_fout: Best output frequency
+ * @best_n1_hs: Best high speed output divider
+ * @best_nc1_ls: Best low speed clkout1 divider
+ * @best_n2_hs: Best high speed feedback divider
+ * @best_n2_ls: Best low speed feedback divider
+ * @best_n3: Best input clock divider
+ * @valid: Validility
+ */
+struct si5324_parameters {
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+ u32 n2_hs;
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+ u32 n32_min;
+ u32 n32_max;
+ u32 n32;
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+ int valid;
+};
+
+/**
+ * struct si5324_hw_data - Clock parameters
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @drvdata: Driver private data
+ * @num: Differential pair clock number
+ */
+struct si5324_hw_data {
+ struct clk_hw hw;
+ struct si5324_driver_data *drvdata;
+ unsigned char num;
+};
+
+/**
+ * struct si5324_driver_data - Driver parameters
+ * @client: I2C client pointer
+ * @regmap: Device's regmap
+ * @onecell: Clock onecell data
+ * @params: Device parameters
+ * @pxtal: Clock
+ * @pxtal_name: Clock name
+ * @xtal: Reference clock
+ * @pclkin1: Clock in 1
+ * @pclkin1_name: Clock in 1 name
+ * @clkin1: Differential input clock 1
+ * @pclkin2: Clock in 2
+ * @pclkin2_name: Clock in 2 name
+ * @clkin2: Differential input clock 2
+ * @pll: Pll clock
+ * @clkout: Output clock
+ * @rate_clkout0: Clock out 0 rate
+ * @rate_clkout1: Clock out 1 rate
+ */
+struct si5324_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk_onecell_data onecell;
+ struct si5324_parameters params;
+ struct clk *pxtal;
+ const char *pxtal_name;
+ struct clk_hw xtal;
+ struct clk *pclkin1;
+ const char *pclkin1_name;
+ struct clk_hw clkin1;
+ struct clk *pclkin2;
+ const char *pclkin2_name;
+ struct clk_hw clkin2;
+ struct si5324_hw_data pll;
+ struct si5324_hw_data *clkout;
+ unsigned long rate_clkout0;
+ unsigned long rate_clkout1;
+};
+
+static const char * const si5324_input_names[] = {
+ "xtal", "clkin1", "clkin2"
+};
+
+static const char * const si5324_pll_name = "pll";
+
+static const char * const si5324_clkout_names[] = {
+ "clk0", "clk1"
+};
+
+enum si53xx_variant {
+ si5319,
+ si5324,
+ si5328
+};
+
+static const char * const si53xx_variant_name[] = {
+ "si5319", "si5324", "si5328"
+};
+
+/**
+ * si5324_reg_read - Read a single si5324 register.
+ *
+ * @drvdata: Device to read from.
+ * @reg: Register to read.
+ *
+ * This function reads data from a single register
+ *
+ * Return: Data of the register on success, error number on failure
+ */
+static inline int
+si5324_reg_read(struct si5324_driver_data *drvdata, u8 reg)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(drvdata->regmap, reg, &val);
+ if (ret < 0) {
+ dev_err(&drvdata->client->dev,
+ "unable to read from reg%02x\n", reg);
+ return ret;
+ }
+
+ return (u8)val;
+}
+
+/**
+ * si5324_bulk_read - Read multiple si5324 registers
+ *
+ * @drvdata: Device to read from
+ * @reg: First register to be read from
+ * @count: Number of registers
+ * @buf: Pointer to store read value
+ *
+ * This function reads from multiple registers which are in
+ * sequential order
+ *
+ * Return: Number of bytes read
+ */
+static inline int si5324_bulk_read(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, u8 *buf)
+{
+ return regmap_bulk_read(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_reg_write - Write a single si5324 register.
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @val: Value to write.
+ *
+ * This function writes into a single register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ *
+ */
+static inline int si5324_reg_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 val)
+{
+ int ret = regmap_write(drvdata->regmap, reg, val);
+
+ dev_dbg(&drvdata->client->dev, "%s 0x%02x @%02d\n", __func__,
+ (int)val, (int)reg);
+ return ret;
+}
+
+/**
+ * si5324_bulk_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @reg: First register
+ * @count: Number of registers
+ * @buf: Block of data to be written
+ *
+ * This function writes into multiple registers.
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_bulk_write(struct si5324_driver_data *drvdata,
+ u8 reg, u8 count, const u8 *buf)
+{
+ return regmap_raw_write(drvdata->regmap, reg, buf, count);
+}
+
+/**
+ * si5324_set_bits - Set the value of a bitfield in a si5324 register
+ *
+ * @drvdata: Device to write to.
+ * @reg: Register to write to.
+ * @mask: Mask of bits to set.
+ * @val: Value to set (unshifted)
+ *
+ * This function set particular bits in register
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static inline int si5324_set_bits(struct si5324_driver_data *drvdata,
+ u8 reg, u8 mask, u8 val)
+{
+ return regmap_update_bits(drvdata->regmap, reg, mask, val);
+}
+
+/**
+ * si5324_bulk_scatter_write - Write into multiple si5324 registers
+ *
+ * @drvdata: Device to write to
+ * @count: Number of registers
+ * @buf: Register and data to write
+ *
+ * This function writes into multiple registers which are need not
+ * to be in sequential order.
+ *
+ * Return: Number of bytes written
+ */
+static inline int
+si5324_bulk_scatter_write(struct si5324_driver_data *drvdata,
+ u8 count, const u8 *buf)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ ret = si5324_reg_write(drvdata, buf[i * 2], buf[i * 2 + 1]);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+/**
+ * si5324_initialize - Initializes si5324 device
+ *
+ * @drvdata: Device instance
+ *
+ * This function initializes si5324 with the following settings
+ * Keep reset asserted for 20ms
+ * 1. freerun mode
+ * 2. Disable output clocks during calibration
+ * 3. Clock selection mode : default value, manual
+ * 4. output signal format : LVDS for clkout1, disable clkout2
+ * 5. CS_CA pin in ignored
+ * 6. Set lock time to 13.3ms
+ * 7. Enables the fastlock.
+ *
+ * Return: Zero on success, negative number on failure.
+ */
+static int si5324_initialize(struct si5324_driver_data *drvdata)
+{
+ int ret = 0;
+
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB,
+ SI5324_RST_ALL, SI5324_RST_ALL);
+ msleep(SI5324_RESET_DELAY_MS);
+ si5324_set_bits(drvdata, SI5324_RESET_CALIB, SI5324_RST_ALL, 0);
+ msleep(SI5324_RESET_DELAY_MS);
+
+ ret = si5324_reg_read(drvdata, SI5324_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CONTROL,
+ (ret | SI5324_CONTROL_FREE_RUN));
+
+ ret = si5324_reg_read(drvdata, SI5324_CKSEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CKSEL, (ret | SI5324_CKSEL_SQL_ICAL));
+ si5324_reg_write(drvdata, SI3324_AUTOSEL, SI5324_AUTOSEL_DEF);
+ si5324_reg_write(drvdata, SI5324_OUTPUT_SIGFMT,
+ SI5324_OUTPUT_SF1_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_DSBL_CLKOUT);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_DSBL_CLKOUT,
+ (ret | SI5324_DSBL_CLKOUT2));
+ ret = si5324_reg_read(drvdata, SI5324_POWERDOWN);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_POWERDOWN, (ret | SI5324_PD_CK2));
+ si5324_reg_write(drvdata, SI5324_FOS_LOCKT, SI5324_FOS_DEFAULT);
+
+ ret = si5324_reg_read(drvdata, SI5324_CK_ACTV_SEL);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_CK_ACTV_SEL, SI5324_CK_DEFAULT);
+ ret = si5324_reg_read(drvdata, SI5324_FASTLOCK);
+ if (ret < 0)
+ return ret;
+
+ si5324_reg_write(drvdata, SI5324_FASTLOCK, (ret | SI5324_FASTLOCK_EN));
+ return 0;
+}
+
+/**
+ * si5324_read_parameters - Reads clock divider parameters
+ *
+ * @drvdata: Device to read from
+ *
+ * This function reads the clock divider parameters into driver structure.
+ *
+ * Following table gives the buffer index, register number and
+ * register name with bit fields
+ * 0 25 N1_HS[2:0]
+ * 6 31 NC1_LS[19:16]
+ * 7 32 NC1_LS[15:8]
+ * 8 33 NC1_LS[7:0]
+ * 9 34 NC2_LS[19:16]
+ * 10 35 NC2_LS[15:8]
+ * 11 36 NC2_LS[7:0]
+ * 15 40 N2_HS[2:0] N2_LS[19:16]
+ * 16 41 N2_LS[15:8]
+ * 17 42 N2_LS[7:0]
+ * 18 43 N31[18:16]
+ * 19 44 N31[15:8]
+ * 20 45 N31[7:0]
+ * 21 46 N32[18:16]
+ * 22 47 N32[15:8]
+ * 23 48 N32[7:0]
+ */
+static void si5324_read_parameters(struct si5324_driver_data *drvdata)
+{
+ u8 buf[SI5324_PARAM_LEN];
+
+ si5324_bulk_read(drvdata, SI5324_N1_HS, SI5324_N1_PARAM_LEN, &buf[0]);
+ si5324_bulk_read(drvdata, SI5324_NC1_LS_H, SI5324_NC_PARAM_LEN,
+ &buf[6]);
+ si5324_bulk_read(drvdata, SI5324_N2_HS_LS_H, SI5324_N2_PARAM_LEN,
+ &buf[15]);
+
+ drvdata->params.n1_hs = (buf[0] >> SI5324_N1_HS_VAL_SHIFT);
+ drvdata->params.n1_hs += 4;
+
+ drvdata->params.nc1_ls = ((buf[6] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[7] << SI5324_LSHIFT) |
+ buf[8];
+ drvdata->params.nc1_ls += 1;
+ drvdata->params.nc2_ls = ((buf[9] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[10] << SI5324_LSHIFT) |
+ buf[11];
+ drvdata->params.nc2_ls += 1;
+ drvdata->params.n2_ls = ((buf[15] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[16] << SI5324_LSHIFT) |
+ buf[17];
+ drvdata->params.n2_ls += 1;
+ drvdata->params.n2_hs = buf[15] >> SI5324_N2_HS_LS_H_VAL_SHIFT;
+ drvdata->params.n2_hs += 4;
+ drvdata->params.n31 = ((buf[18] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[19] << SI5324_LSHIFT) |
+ buf[20];
+ drvdata->params.n31 += 1;
+ drvdata->params.n32 = ((buf[21] & SI5324_DIV_LS_MASK) <<
+ SI5324_HSHIFT) | (buf[22] << SI5324_LSHIFT) |
+ buf[23];
+ drvdata->params.n32 += 1;
+ drvdata->params.valid = 1;
+}
+
+static bool si5324_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+/**
+ * si5324_regmap_is_readable - Checks the register is readable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is readable or not.
+ *
+ * Return: True if the register is reabdle, False if it is not readable.
+ */
+static bool si5324_regmap_is_readable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2)
+ return false;
+
+ return true;
+}
+
+/**
+ * si5324_regmap_is_writable - Checks the register is writable or not
+ *
+ * @dev: Registered device
+ * @reg: Register offset
+ *
+ * Checks the register is writable or not.
+ *
+ * Return: True if the register is writeable, False if it's not writeable.
+ */
+static bool si5324_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ if ((reg > SI5324_POWERDOWN && reg < SI5324_FOS_LOCKT) ||
+ (reg > SI5324_N1_HS && reg < SI5324_NC1_LS_H) ||
+ (reg > SI5324_NC2_LS_L && reg < SI5324_N2_HS_LS_H) ||
+ (reg > SI5324_N32_CLKIN_L && reg < SI5324_FOS_CLKIN_RATE) ||
+ (reg > SI5324_FOS_CLKIN_RATE && reg < SI5324_PLL_ACTV_CLK) ||
+ reg > SI5324_SKEW2 ||
+ (reg >= SI5324_PLL_ACTV_CLK && reg <= SI5324_CLKIN_LOL_STATUS) ||
+ (reg >= SI5324_PARTNO_H && reg <= SI5324_PARTNO_L))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_config si5324_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 144,
+ .writeable_reg = si5324_regmap_is_writeable,
+ .readable_reg = si5324_regmap_is_readable,
+ .volatile_reg = si5324_regmap_is_volatile,
+};
+
+static int si5324_xtal_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_xtal_unprepare(struct clk_hw *hw)
+{
+}
+
+static const struct clk_ops si5324_xtal_ops = {
+ .prepare = si5324_xtal_prepare,
+ .unprepare = si5324_xtal_unprepare,
+};
+
+/**
+ * si5324_clkin_prepare - Prepare the clkin
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ *
+ * This function enables the particular clk
+ *
+ * Return: Zero on success, a negative error number on failure.
+ */
+static int si5324_clkin_prepare(struct clk_hw *hw)
+{
+ int ret = 0;
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK2);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ ret = si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ ret = si5324_set_bits(drvdata, SI5324_POWERDOWN, SI5324_PD_CK1 |
+ SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+
+ return ret;
+}
+
+/**
+ * si5324_clkin_unprepare - Unprepare the clkin
+ *
+ * @hw: Clock hardware
+ *
+ * This function enables the particular clk.
+ */
+static void si5324_clkin_unprepare(struct clk_hw *hw)
+{
+ struct si5324_driver_data *drvdata;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (hwdata->num == SI5324_CLKIN1) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin1);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ } else if (hwdata->num == SI5324_CLKIN2) {
+ drvdata = container_of(hw, struct si5324_driver_data, clkin2);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ }
+}
+
+static unsigned long si5324_clkin_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_clkin_ops = {
+ .prepare = si5324_clkin_prepare,
+ .unprepare = si5324_clkin_unprepare,
+ .recalc_rate = si5324_clkin_recalc_rate,
+};
+
+static int si5324_pll_reparent(struct si5324_driver_data *drvdata,
+ int num, enum si5324_pll_src parent)
+{
+ if (parent == SI5324_PLL_SRC_XTAL) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN,
+ SI5324_CONTROL_FREE_RUN);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ } else if (parent == SI5324_PLL_SRC_CLKIN1) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK2);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT, 0);
+ } else if (parent == SI5324_PLL_SRC_CLKIN2) {
+ si5324_set_bits(drvdata, SI5324_CONTROL,
+ SI5324_CONTROL_FREE_RUN, 0);
+ si5324_set_bits(drvdata, SI5324_POWERDOWN,
+ SI5324_PD_CK1 | SI5324_PD_CK2, SI5324_PD_CK1);
+ si5324_set_bits(drvdata, SI5324_CKSEL,
+ SI5324_CK_SEL << SI5324_CKSEL_SHIFT,
+ 1 << SI5324_CKSEL_SHIFT);
+ }
+
+ return 0;
+}
+
+static unsigned char si5324_pll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+/**
+ * si5324_pll_set_parent - Set parent of clock
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @index: Parent index
+ *
+ * This function sets the paraent of clock.
+ *
+ * Return: 0 on success, negative error number on failure
+ */
+static int si5324_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+ enum si5324_pll_src parent;
+
+ if (index == SI5324_SRC_XTAL)
+ parent = SI5324_PLL_SRC_XTAL;
+ else if (index == SI5324_SRC_CLKIN1)
+ parent = SI5324_PLL_SRC_CLKIN1;
+ else if (index == SI5324_SRC_CLKIN2)
+ parent = SI5324_PLL_SRC_CLKIN2;
+ else
+ return -EINVAL;
+
+ return si5324_pll_reparent(hwdata->drvdata, hwdata->num, parent);
+}
+
+/**
+ * si5324_pll_recalc_rate - Recalculate clock frequency
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @parent_rate: Clock frequency of parent clock
+ *
+ * This function recalculate clock frequency.
+ *
+ * Return: Current clock frequency
+ */
+static unsigned long si5324_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ if (!hwdata->drvdata->params.valid)
+ si5324_read_parameters(hwdata->drvdata);
+ WARN_ON(!hwdata->drvdata->params.valid);
+
+ rate = parent_rate * hwdata->drvdata->params.n2_ls *
+ hwdata->drvdata->params.n2_hs;
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: n2_ls = %u, n2_hs = %u, parent_rate = %lu, rate = %lu\n",
+ __func__, clk_hw_get_name(hw),
+ hwdata->drvdata->params.n2_ls, hwdata->drvdata->params.n2_hs,
+ parent_rate, (unsigned long)rate);
+
+ return rate;
+}
+
+static long si5324_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static int si5324_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static const struct clk_ops si5324_pll_ops = {
+ .set_parent = si5324_pll_set_parent,
+ .get_parent = si5324_pll_get_parent,
+ .recalc_rate = si5324_pll_recalc_rate,
+ .round_rate = si5324_pll_round_rate,
+ .set_rate = si5324_pll_set_rate,
+};
+
+static int si5324_clkout_set_drive_strength(
+ struct si5324_driver_data *drvdata, int num,
+ enum si5324_drive_strength drive)
+{
+ return 0;
+}
+
+static int si5324_clkout_prepare(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static void si5324_clkout_unprepare(struct clk_hw *hw)
+{
+}
+
+static unsigned long si5324_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate;
+
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ rate = hwdata->drvdata->rate_clkout0;
+
+ return rate;
+}
+
+/**
+ * si5324_clkout_round_rate - selects the closest value to requested one.
+ *
+ * @hw: Handle between common and hardware-specific interfaces
+ * @rate: Clock rate
+ * @parent_rate: Parent clock rate
+ *
+ * This function selects the rate closest to the requested one.
+ *
+ * Return: Clock rate on success, negative error number on failure
+ */
+static long si5324_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel;
+ int ret;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ return actual_rate;
+}
+
+static int si5324_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5324_hw_data *hwdata =
+ container_of(hw, struct si5324_hw_data, hw);
+
+ u32 ncn_ls, n2_ls, n3n, actual_rate;
+ u8 n1_hs, n2_hs, bwsel, buf[SI5324_OUT_REGS * 2];
+ int i, ret, rc;
+
+ ret = si5324_calcfreqsettings(SI5324_REF_CLOCK, rate, &actual_rate,
+ &n1_hs, &ncn_ls, &n2_hs, &n2_ls, &n3n,
+ &bwsel);
+ if (ret < 0)
+ return ret;
+
+ hwdata->drvdata->rate_clkout0 = rate;
+ i = 0;
+
+ /* Enable Free running mode */
+ buf[i] = SI5324_CONTROL;
+ buf[i + 1] = SI5324_FREE_RUN_EN;
+ i += 2;
+
+ /* Loop bandwidth */
+ buf[i] = SI5324_BWSEL;
+ buf[i + 1] = (bwsel << SI5324_BWSEL_SHIFT) | SI5324_BWSEL_DEF_VAL;
+ i += 2;
+
+ /* Enable reference clock 2 in free running mode */
+ buf[i] = SI5324_POWERDOWN;
+ /* Enable input clock 2, Disable input clock 1 */
+ buf[i + 1] = SI5324_PD_CK1_DIS;
+ i += 2;
+
+ /* N1_HS */
+ buf[i] = SI5324_N1_HS;
+ buf[i + 1] = n1_hs << SI5324_N1_HS_VAL_SHIFT;
+ i += 2;
+
+ /* NC1_LS */
+ buf[i] = SI5324_NC1_LS_H;
+ buf[i + 1] = (u8)((ncn_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_NC1_LS_M;
+ buf[i + 3] = (u8)((ncn_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_NC1_LS_L;
+ buf[i + 5] = (u8)(ncn_ls & 0x000000FF);
+ i += 6;
+
+ /* N2_HS and N2_LS */
+ buf[i] = SI5324_N2_HS_LS_H;
+ buf[i + 1] = (n2_hs << SI5324_N2_HS_LS_H_VAL_SHIFT);
+ buf[i + 1] |= (u8)((n2_ls & 0x000F0000) >> 16);
+ buf[i + 2] = SI5324_N2_LS_H;
+ buf[i + 3] = (u8)((n2_ls & 0x0000FF00) >> 8);
+ buf[i + 4] = SI5324_N2_LS_L;
+ buf[i + 5] = (u8)(n2_ls & 0x000000FF);
+ i += 6;
+
+ /* N32 (CLKIN2 or XTAL in FREERUNNING mode) */
+ buf[i] = SI5324_N32_CLKIN_H;
+ buf[i + 2] = SI5324_N32_CLKIN_M;
+ buf[i + 4] = SI5324_N32_CLKIN_L;
+ buf[i + 1] = (u8)((n3n & 0x00070000) >> 16);
+ buf[i + 3] = (u8)((n3n & 0x0000FF00) >> 8);
+ buf[i + 5] = (u8)(n3n & 0x000000FF);
+ i += 6;
+
+ /* Start calibration */
+ buf[i] = SI5324_RESET_CALIB;
+ buf[i + 1] = SI5324_CALIB_EN;
+ i += 2;
+
+ hwdata->drvdata->params.valid = 0;
+ rc = si5324_bulk_scatter_write(hwdata->drvdata, SI5324_OUT_REGS, buf);
+
+ return rc;
+}
+
+static const struct clk_ops si5324_clkout_ops = {
+ .prepare = si5324_clkout_prepare,
+ .unprepare = si5324_clkout_unprepare,
+ .recalc_rate = si5324_clkout_recalc_rate,
+ .round_rate = si5324_clkout_round_rate,
+ .set_rate = si5324_clkout_set_rate,
+};
+
+static const struct of_device_id si5324_dt_ids[] = {
+ { .compatible = "silabs,si5319" },
+ { .compatible = "silabs,si5324" },
+ { .compatible = "silabs,si5328" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si5324_dt_ids);
+
+static int si5324_dt_parse(struct i2c_client *client)
+{
+ struct device_node *child, *np = client->dev.of_node;
+ struct si5324_platform_data *pdata;
+ struct property *prop;
+ const __be32 *p;
+ int num = 0;
+ u32 val;
+
+ if (!np)
+ return 0;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * property silabs,pll-source : <num src>, [<..>]
+ * allow to selectively set pll source
+ */
+ of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ if (num >= 1) {
+ dev_err(&client->dev,
+ "invalid pll %d on pll-source prop\n", num);
+ return -EINVAL;
+ }
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(&client->dev,
+ "missing pll-source for pll %d\n", num);
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 0:
+ dev_dbg(&client->dev, "using xtal as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_XTAL;
+ break;
+ case 1:
+ dev_dbg(&client->dev,
+ "using clkin1 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN1;
+ break;
+ case 2:
+ dev_dbg(&client->dev,
+ "using clkin2 as parent for pll\n");
+ pdata->pll_src = SI5324_PLL_SRC_CLKIN2;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for pll %d\n", val, num);
+ return -EINVAL;
+ }
+ }
+ /* per clkout properties */
+ for_each_child_of_node(np, child) {
+ if (of_property_read_u32(child, "reg", &num)) {
+ dev_err(&client->dev, "missing reg property of %s\n",
+ child->name);
+ goto put_child;
+ }
+
+ if (num >= 2) {
+ dev_err(&client->dev, "invalid clkout %d\n", num);
+ goto put_child;
+ }
+
+ if (!of_property_read_u32(child, "silabs,drive-strength",
+ &val)) {
+ switch (val) {
+ case SI5324_DRIVE_2MA:
+ case SI5324_DRIVE_4MA:
+ case SI5324_DRIVE_6MA:
+ case SI5324_DRIVE_8MA:
+ pdata->clkout[num].drive = val;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid drive strength %d for clkout %d\n",
+ val, num);
+ goto put_child;
+ }
+ }
+
+ if (!of_property_read_u32(child, "clock-frequency", &val)) {
+ dev_dbg(&client->dev, "clock-frequency = %u\n", val);
+ pdata->clkout[num].rate = val;
+ } else {
+ dev_err(&client->dev,
+ "missing clock-frequency property of %s\n",
+ child->name);
+ goto put_child;
+ }
+ }
+ client->dev.platform_data = pdata;
+
+ return 0;
+put_child:
+ of_node_put(child);
+ return -EINVAL;
+}
+
+static u8 instance;
+
+static int si5324_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si5324_platform_data *pdata;
+ struct si5324_driver_data *drvdata;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *parent_names[3];
+ char inst_names[NUM_NAME_IDS][MAX_NAME_LEN];
+ u8 num_parents, num_clocks;
+ int ret, n;
+ enum si53xx_variant variant = id->driver_data;
+
+ if (variant > si5328) {
+ dev_err(&client->dev, "si53xx device not present\n");
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "%s probed\n", si53xx_variant_name[variant]);
+ ret = si5324_dt_parse(client);
+ if (ret)
+ return ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->client = client;
+ drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+ drvdata->pclkin1 = devm_clk_get(&client->dev, "clkin1");
+ drvdata->pclkin2 = devm_clk_get(&client->dev, "clkin2");
+
+ if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin1) == -EPROBE_DEFER ||
+ PTR_ERR(drvdata->pclkin2) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ drvdata->regmap = devm_regmap_init_i2c(client, &si5324_regmap_config);
+ if (IS_ERR(drvdata->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(drvdata->regmap);
+ }
+
+ i2c_set_clientdata(client, drvdata);
+ si5324_initialize(drvdata);
+
+ /* setup input clock configuration */
+ ret = si5324_pll_reparent(drvdata, 0, pdata->pll_src);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent pll to %d\n",
+ pdata->pll_src);
+ return ret;
+ }
+
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ ret = si5324_clkout_set_drive_strength(drvdata, n,
+ pdata->clkout[n].drive);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed set drive strength of clkout%d to %d\n",
+ n, pdata->clkout[n].drive);
+ return ret;
+ }
+ }
+
+ if (!IS_ERR(drvdata->pxtal))
+ clk_prepare_enable(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_prepare_enable(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_prepare_enable(drvdata->pclkin2);
+
+ /* create instance names by appending instance id */
+ for (n = 0; n < SI5324_SRC_CLKS; n++) {
+ sprintf(inst_names[n], "%s_%d", si5324_input_names[n],
+ instance);
+ }
+ sprintf(inst_names[3], "%s_%d", si5324_pll_name, instance);
+ for (n = 0; n < SI5324_MAX_CLKOUTS; n++) {
+ sprintf(inst_names[n + 4], "%s_%d", si5324_clkout_names[n],
+ instance);
+ }
+
+ /* register xtal input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[0];
+ init.ops = &si5324_xtal_ops;
+ init.flags = 0;
+
+ if (!IS_ERR(drvdata->pxtal)) {
+ drvdata->pxtal_name = __clk_get_name(drvdata->pxtal);
+ init.parent_names = &drvdata->pxtal_name;
+ init.num_parents = 1;
+ }
+ drvdata->xtal.init = &init;
+
+ clk = devm_clk_register(&client->dev, &drvdata->xtal);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin1 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[1];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin1)) {
+ drvdata->pclkin1_name = __clk_get_name(drvdata->pclkin1);
+ init.parent_names = &drvdata->pclkin1_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin1.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin1);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clkin2 input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[2];
+ init.ops = &si5324_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin2)) {
+ drvdata->pclkin2_name = __clk_get_name(drvdata->pclkin2);
+ init.parent_names = &drvdata->pclkin2_name;
+ init.num_parents = 1;
+ }
+
+ drvdata->clkin2.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin2);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* Si5324 allows to mux xtal or clkin1 or clkin2 to PLL input */
+ num_parents = SI5324_SRC_CLKS;
+ parent_names[0] = inst_names[0];
+ parent_names[1] = inst_names[1];
+ parent_names[2] = inst_names[2];
+
+ /* register PLL */
+ drvdata->pll.drvdata = drvdata;
+ drvdata->pll.hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[3];
+ init.ops = &si5324_pll_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->pll.hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+
+ /* register clk out divider */
+ num_clocks = 2;
+ num_parents = 1;
+ parent_names[0] = inst_names[3];
+
+ drvdata->clkout = devm_kzalloc(&client->dev, num_clocks *
+ sizeof(*drvdata->clkout), GFP_KERNEL);
+
+ drvdata->onecell.clk_num = num_clocks;
+ drvdata->onecell.clks = devm_kzalloc(&client->dev,
+ num_clocks *
+ sizeof(*drvdata->onecell.clks),
+ GFP_KERNEL);
+
+ if (WARN_ON(!drvdata->clkout) || !drvdata->onecell.clks) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
+
+ for (n = 0; n < num_clocks; n++) {
+ drvdata->clkout[n].num = n;
+ drvdata->clkout[n].drvdata = drvdata;
+ drvdata->clkout[n].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = inst_names[4 + n];
+ init.ops = &si5324_clkout_ops;
+ init.flags = 0;
+ init.flags |= CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk = devm_clk_register(&client->dev, &drvdata->clkout[n].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ ret = PTR_ERR(clk);
+ goto err_clk;
+ }
+ /* refer to output clock in onecell */
+ drvdata->onecell.clks[n] = clk;
+
+ /* set initial clkout rate */
+ if (pdata->clkout[n].rate != 0) {
+ int ret;
+
+ ret = clk_set_rate(clk, pdata->clkout[n].rate);
+ if (ret != 0) {
+ dev_err(&client->dev, "Cannot set rate : %d\n",
+ ret);
+ }
+ }
+ }
+
+ ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+ &drvdata->onecell);
+ if (ret) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ goto err_clk;
+ }
+
+ dev_info(&client->dev, "%s probe successful\n",
+ si53xx_variant_name[variant]);
+ instance++;
+ return 0;
+
+err_clk:
+ if (!IS_ERR(drvdata->pxtal))
+ clk_disable_unprepare(drvdata->pxtal);
+ if (!IS_ERR(drvdata->pclkin1))
+ clk_disable_unprepare(drvdata->pclkin1);
+ if (!IS_ERR(drvdata->pclkin2))
+ clk_disable_unprepare(drvdata->pclkin2);
+
+ return ret;
+}
+
+static int si5324_i2c_remove(struct i2c_client *client)
+{
+ of_clk_del_provider(client->dev.of_node);
+ return 0;
+}
+
+static const struct i2c_device_id si5324_i2c_ids[] = {
+ { "si5319", si5319 },
+ { "si5324", si5324 },
+ { "si5328", si5328 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si5324_i2c_ids);
+
+static struct i2c_driver si5324_driver = {
+ .driver = {
+ .name = "si5324",
+ .of_match_table = of_match_ptr(si5324_dt_ids),
+ },
+ .probe = si5324_i2c_probe,
+ .remove = si5324_i2c_remove,
+ .id_table = si5324_i2c_ids,
+};
+module_i2c_driver(si5324_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao G <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Silicon Labs 5319/5324/5328 clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-si5324.h b/drivers/clk/clk-si5324.h
new file mode 100644
index 000000000000..48e62a67f56e
--- /dev/null
+++ b/drivers/clk/clk-si5324.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Authors: Leon Woestenberg <leon@sidebranch.com>
+ * Venkateshwar Rao <vgannava@xilinx.com>
+ */
+
+#ifndef _CLK_SI5324_H_
+#define _CLK_SI5324_H_
+
+#define SI5324_BUS_BASE_ADDR 0x68
+
+#define SI5324_CONTROL 0
+#define SI5324_CONTROL_FREE_RUN BIT(6)
+#define SI5324_FREE_RUN_EN 0x54
+
+#define SI5324_INCK_PRIOR 1
+#define SI5324_INCK_PRIOR_1_MASK 0xC
+#define SI5324_INCK_PRIOI_2_MASK 0x3
+
+#define SI5324_BWSEL 2
+#define SI5324_BWSEL_MASK 0xF0
+#define SI5324_BWSEL_SHIFT 4
+#define SI5324_BWSEL_DEF_VAL 2
+
+#define SI5324_CKSEL 3
+#define SI5324_CKSEL_SQL_ICAL BIT(4)
+#define SI5324_CKSEL_SHIFT 6
+#define SI5324_CK_SEL 3
+
+#define SI3324_AUTOSEL 4
+#define SI5324_AUTOSEL_DEF 0x12
+
+#define SI5324_ICMOS 5
+#define SI5324_OUTPUT_SIGFMT 6
+#define SI5324_OUTPUT_SF1_DEFAULT 0xF
+#define SI5324_REFFRE_FOS 7
+#define SI5324_HLOG 8
+#define SI5324_AVG_HIST 9
+#define SI5324_DSBL_CLKOUT 10
+#define SI5324_DSBL_CLKOUT2 BIT(3)
+#define SI5324_POWERDOWN 11
+#define SI5324_PD_CK1 BIT(0)
+#define SI5324_PD_CK2 BIT(1)
+#define SI5324_PD_CK1_DIS 0x41
+#define SI5324_PD_CK2_DIS 0x42
+#define SI5324_FOS_LOCKT 19
+#define SI5324_FOS_DEFAULT 0x23
+#define SI5324_CK_ACTV_SEL 21
+#define SI5324_CK_DEFAULT 0xFC
+#define SI5324_CK_ACTV BIT(1)
+#define SI5324_CK_SELPIN BIT(1)
+#define SI5324_LOS_MSK 23
+#define SI5324_FOS_L0L_MASK 24
+
+/* output clock dividers */
+#define SI5324_N1_HS 25
+#define SI5324_N1_HS_VAL_SHIFT 5
+#define SI5324_HSHIFT 16
+#define SI5324_LSHIFT 8
+#define SI5324_NC1_LS_H 31
+#define SI5324_NC1_LS_M 32
+#define SI5324_NC1_LS_L 33
+#define SI5324_DIV_LS_MASK 0x0F
+#define SI5324_DIV_HS_MASK 0xF0
+#define SI5324_NC2_LS_H 34
+#define SI5324_NC2_LS_M 35
+#define SI5324_NC2_LS_L 36
+
+#define SI5324_N2_HS_LS_H 40
+#define SI5324_N2_HS_LS_H_VAL_SHIFT 5
+#define SI5324_N2_LS_H 41
+#define SI5324_N2_LS_L 42
+#define SI5324_N31_CLKIN_H 43
+#define SI5324_N31_CLKIN_M 44
+#define SI5324_N31_CLKIN_L 45
+#define SI5324_N32_CLKIN_H 46
+#define SI5324_N32_CLKIN_M 47
+#define SI5324_N32_CLKIN_L 48
+#define SI5324_FOS_CLKIN_RATE 55
+#define SI5324_PLL_ACTV_CLK 128
+#define SI5324_LOS_STATUS 129
+#define SI5324_CLKIN_LOL_STATUS 130
+#define SI5324_LOS_FLG 131
+#define SI5324_FOS_FLG 132
+#define SI5324_PARTNO_H 134
+#define SI5324_PARTNO_L 135
+
+#define SI5324_RESET_CALIB 136
+#define SI5324_RST_ALL BIT(7)
+#define SI5324_CALIB_EN BIT(6)
+
+#define SI5324_FASTLOCK 137
+#define SI5324_FASTLOCK_EN BIT(0)
+#define SI5324_LOS1_LOS2_EN 138
+#define SI5324_SKEW1 142
+#define SI5324_SKEW2 143
+
+/* selects 2kHz to 710 MHz */
+#define SI5324_CLKIN_MIN_FREQ 2000
+#define SI5324_CLKIN_MAX_FREQ (710 * 1000 * 1000)
+
+/* generates 2kHz to 945 MHz */
+#define SI5324_CLKOUT_MIN_FREQ 2000
+#define SI5324_CLKOUT_MAX_FREQ (945 * 1000 * 1000)
+
+/* The following constants define the limits of the divider settings. */
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+
+#define SI5324_SRC_XTAL 0
+#define SI5324_SRC_CLKIN1 1
+#define SI5324_SRC_CLKIN2 2
+#define SI5324_SRC_CLKS 3
+
+#define SI5324_CLKIN1 0
+#define SI5324_CLKIN2 1
+#define SI5324_MAX_CLKOUTS 2
+#define NUM_NAME_IDS 6 /* 3 clkin, 1 pll, 2 clkout */
+#define MAX_NAME_LEN 11
+#define SI5324_PARAM_LEN 24
+#define SI5324_NC_PARAM_LEN 6
+#define SI5324_OUT_REGS 14
+#define SI5324_N1_PARAM_LEN 1
+#define SI5324_N2_PARAM_LEN 9
+#define SI5324_REF_CLOCK 114285000UL
+#define SI5324_RESET_DELAY_MS 20
+
+#endif
diff --git a/drivers/clk/clk-xlnx-clock-wizard-v.c b/drivers/clk/clk-xlnx-clock-wizard-v.c
new file mode 100644
index 000000000000..c033de43b036
--- /dev/null
+++ b/drivers/clk/clk-xlnx-clock-wizard-v.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx 'Clocking Wizard' driver
+ *
+ * Copyright (C) 2020 Xilinx
+ *
+ * Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define WZRD_NUM_OUTPUTS 7
+#define WZRD_ACLK_MAX_FREQ 250000000UL
+
+#define WZRD_CLK_CFG_REG(n) (0x330 + 4 * (n))
+
+#define WZRD_CLKFBOUT_FRAC_EN BIT(1)
+
+#define WZRD_CLKFBOUT_L_SHIFT 0
+#define WZRD_CLKFBOUT_H_SHIFT 8
+#define WZRD_CLKFBOUT_L_MASK (0xff << WZRD_CLKFBOUT_L_SHIFT)
+#define WZRD_CLKFBOUT_H_MASK (0xff << WZRD_CLKFBOUT_H_SHIFT)
+#define WZRD_CLKFBOUT_FRAC_SHIFT 16
+#define WZRD_CLKFBOUT_FRAC_MASK (0x3f)
+#define WZRD_DIVCLK_DIVIDE_SHIFT 0
+#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_WIDTH 8
+#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_FRAC_SHIFT 8
+#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+
+#define WZRD_DR_MAX_INT_DIV_VALUE 32767
+#define WZRD_DR_NUM_RETRIES 10000
+#define WZRD_DR_STATUS_REG_OFFSET 0x04
+#define WZRD_DR_LOCK_BIT_MASK 0x00000001
+#define WZRD_DR_INIT_REG_OFFSET 0x14
+#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
+#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+
+/* Get the mask from width */
+#define div_mask(width) ((1 << (width)) - 1)
+
+/* Extract divider instance from clock hardware instance */
+#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
+
+enum clk_wzrd_int_clks {
+ wzrd_clk_mul,
+ wzrd_clk_mul_div,
+ wzrd_clk_mul_frac,
+ wzrd_clk_int_max
+};
+
+/**
+ * struct clk_wzrd - Clock wizard private data structure
+ *
+ * @clk_data: Clock data
+ * @nb: Notifier block
+ * @base: Memory base
+ * @clk_in1: Handle to input clock 'clk_in1'
+ * @axi_clk: Handle to input clock 's_axi_aclk'
+ * @clks_internal: Internal clocks
+ * @clkout: Output clocks
+ * @suspended: Flag indicating power state of the device
+ * @is_versal: Flag indicating if it versal device
+ */
+struct clk_wzrd {
+ struct clk_onecell_data clk_data;
+ struct notifier_block nb;
+ void __iomem *base;
+ struct clk *clk_in1;
+ struct clk *axi_clk;
+ struct clk *clks_internal[wzrd_clk_int_max];
+ struct clk *clkout[WZRD_NUM_OUTPUTS];
+ bool suspended;
+ bool is_versal;
+};
+
+/**
+ * struct clk_wzrd_divider - clock divider specific to clk_wzrd
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @base: base address of register containing the divider
+ * @offset: offset address of register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @flags: clk_wzrd divider flags
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ */
+struct clk_wzrd_divider {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock; /* divider lock */
+};
+
+#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
+
+/* spin lock variable for clk_wzrd */
+static DEFINE_SPINLOCK(clkwzrd_lock);
+
+static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+ unsigned int vall, valh;
+ u32 div;
+ u32 p5en, edge, prediv2;
+ u32 all;
+
+ edge = !!(readl(div_addr) & BIT(8));
+ p5en = !!(readl(div_addr) & BIT(13));
+ prediv2 = !!(readl(div_addr) & BIT(11));
+ vall = readl(div_addr + 4) & 0xff;
+ valh = readl(div_addr + 4) >> 8;
+ all = valh + vall + edge;
+ if (!all)
+ all = 1;
+ div = (prediv2 + 1) * (all + (prediv2 * p5en) / 2);
+ return DIV_ROUND_UP_ULL((u64)parent_rate, div);
+}
+
+static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value;
+ unsigned long flags = 0;
+ u32 regh, edged;
+ u32 p5en, p5fedge;
+ u32 regval, regval1;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+ regh = (value / 4);
+ regh = regh * 2;
+ regval = regh | (regh << 8);
+ regval1 = readl(div_addr);
+ regval1 = regval1 & ~(BIT(8) | BIT(13) | BIT(15));
+ if (value % 4 > 1) {
+ edged = 1;
+ regval1 |= (edged << 8);
+ }
+ p5fedge = value % 2;
+ p5en = value % 2;
+ regval1 = regval1 | p5en << 13 | p5fedge << 15;
+ writel(regval1, div_addr);
+ regval = regh | regh << 8;
+ writel(regval, div_addr + 4);
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ pr_err("NOT LOCKED\n");
+ err = -ETIMEDOUT;
+ }
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u8 div;
+
+ /*
+ * since we donot change parent rate we just round rate to closest
+ * achievable
+ */
+ div = DIV_ROUND_CLOSEST(*prate, rate);
+
+ return (*prate / div);
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate,
+};
+
+static struct clk *clk_wzrd_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static int __maybe_unused clk_wzrd_suspend(struct device *dev)
+{
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+ clk_wzrd->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused clk_wzrd_resume(struct device *dev)
+{
+ int ret;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable s_axi_aclk\n");
+ return ret;
+ }
+
+ clk_wzrd->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
+ clk_wzrd_resume);
+
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ u32 regl, regh, edge, mult;
+ u32 regld, reghd, edged, div;
+ unsigned long rate;
+ const char *clk_name;
+ struct clk_wzrd *clk_wzrd;
+ struct resource *mem;
+ int outputs;
+ struct device_node *np = pdev->dev.of_node;
+
+ clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
+
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1)) {
+ if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_in1 not found\n");
+ return PTR_ERR(clk_wzrd->clk_in1);
+ }
+
+ clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk)) {
+ if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "s_axi_aclk not found\n");
+ return PTR_ERR(clk_wzrd->axi_clk);
+ }
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
+ return ret;
+ }
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
+ rate);
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ /* register multiplier */
+ edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) & BIT(8));
+ regl = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(1)) &
+ WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
+ regh = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(1)) &
+ WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
+ mult = (regl + regh + edge);
+ if (!mult)
+ mult = 1;
+ mult = mult * 64;
+
+ regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(51)) &
+ WZRD_CLKFBOUT_FRAC_EN;
+ if (!regl) {
+ regl = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(48)) &
+ WZRD_CLKFBOUT_FRAC_MASK;
+ mult = mult + regl;
+ }
+
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+ clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
+ (&pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clk_in1),
+ 0, mult, 64);
+ kfree(clk_name);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
+ dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ goto err_disable_clk;
+ }
+
+ outputs = of_property_count_strings(np, "clock-output-names");
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_rm_int_clk;
+ }
+
+ /* register div */
+ edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(20)) &
+ BIT(10));
+ regld = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(21)) &
+ WZRD_CLKFBOUT_L_MASK) >> WZRD_CLKFBOUT_L_SHIFT;
+ reghd = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(21)) &
+ WZRD_CLKFBOUT_H_MASK) >> WZRD_CLKFBOUT_H_SHIFT;
+ div = (regld + reghd + edged);
+ if (!div)
+ div = 1;
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor
+ (&pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ 0, 1, div);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
+ dev_err(&pdev->dev, "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
+ goto err_rm_int_clk;
+ }
+
+ /* register div per output */
+ for (i = outputs - 1; i >= 0 ; i--) {
+ const char *clkout_name;
+
+ if (of_property_read_string_index(np, "clock-output-names", i,
+ &clkout_name)) {
+ dev_err(&pdev->dev,
+ "clock output name not specified\n");
+ ret = -EINVAL;
+ goto err_rm_int_clks;
+ }
+
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(3) + i * 8),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ if (IS_ERR(clk_wzrd->clkout[i])) {
+ int j;
+
+ for (j = i + 1; j < outputs; j++)
+ clk_unregister(clk_wzrd->clkout[j]);
+ dev_err(&pdev->dev,
+ "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clkout[i]);
+ goto err_rm_int_clks;
+ }
+ }
+
+ kfree(clk_name);
+
+ clk_wzrd->clk_data.clks = clk_wzrd->clkout;
+ clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+
+ return 0;
+
+err_rm_int_clks:
+ clk_unregister(clk_wzrd->clks_internal[1]);
+err_rm_int_clk:
+ kfree(clk_name);
+ clk_unregister(clk_wzrd->clks_internal[0]);
+err_disable_clk:
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return ret;
+}
+
+static int clk_wzrd_remove(struct platform_device *pdev)
+{
+ int i;
+ struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
+ clk_unregister(clk_wzrd->clkout[i]);
+ for (i = 0; i < wzrd_clk_int_max; i++)
+ clk_unregister(clk_wzrd->clks_internal[i]);
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id clk_wzrd_ids[] = {
+ { .compatible = "xlnx,clk-wizard-1.0" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
+
+static struct platform_driver clk_wzrd_driver = {
+ .driver = {
+ .name = "clk-wizard",
+ .of_match_table = clk_wzrd_ids,
+ .pm = &clk_wzrd_dev_pm_ops,
+ },
+ .probe = clk_wzrd_probe,
+ .remove = clk_wzrd_remove,
+};
+module_platform_driver(clk_wzrd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("Driver for the Versal Clocking Wizard IP core");
diff --git a/drivers/clk/clk-xlnx-clock-wizard.c b/drivers/clk/clk-xlnx-clock-wizard.c
new file mode 100644
index 000000000000..d8e4acd1fd07
--- /dev/null
+++ b/drivers/clk/clk-xlnx-clock-wizard.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx 'Clocking Wizard' driver
+ *
+ * Copyright (C) 2013 - 2014 Xilinx
+ *
+ * Sören Brinkmann <soren.brinkmann@xilinx.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define WZRD_NUM_OUTPUTS 7
+#define WZRD_ACLK_MAX_FREQ 250000000UL
+
+#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
+
+#define WZRD_CLKOUT0_FRAC_EN BIT(18)
+#define WZRD_CLKFBOUT_FRAC_EN BIT(26)
+
+#define WZRD_CLKFBOUT_MULT_SHIFT 8
+#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_FRAC_SHIFT 16
+#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
+#define WZRD_DIVCLK_DIVIDE_SHIFT 0
+#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_WIDTH 8
+#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_FRAC_SHIFT 8
+#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+
+#define WZRD_DR_MAX_INT_DIV_VALUE 255
+#define WZRD_DR_NUM_RETRIES 10000
+#define WZRD_DR_STATUS_REG_OFFSET 0x04
+#define WZRD_DR_LOCK_BIT_MASK 0x00000001
+#define WZRD_DR_INIT_REG_OFFSET 0x25C
+#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
+#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+
+/* Get the mask from width */
+#define div_mask(width) ((1 << (width)) - 1)
+
+/* Extract divider instance from clock hardware instance */
+#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
+
+enum clk_wzrd_int_clks {
+ wzrd_clk_mul,
+ wzrd_clk_mul_div,
+ wzrd_clk_mul_frac,
+ wzrd_clk_int_max
+};
+
+/**
+ * struct clk_wzrd - Clock wizard private data structure
+ *
+ * @clk_data: Clock data
+ * @nb: Notifier block
+ * @base: Memory base
+ * @clk_in1: Handle to input clock 'clk_in1'
+ * @axi_clk: Handle to input clock 's_axi_aclk'
+ * @clks_internal: Internal clocks
+ * @clkout: Output clocks
+ * @speed_grade: Speed grade of the device
+ * @suspended: Flag indicating power state of the device
+ */
+struct clk_wzrd {
+ struct clk_onecell_data clk_data;
+ struct notifier_block nb;
+ void __iomem *base;
+ struct clk *clk_in1;
+ struct clk *axi_clk;
+ struct clk *clks_internal[wzrd_clk_int_max];
+ struct clk *clkout[WZRD_NUM_OUTPUTS];
+ unsigned int speed_grade;
+ bool suspended;
+};
+
+/**
+ * struct clk_wzrd_divider - clock divider specific to clk_wzrd
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @base: base address of register containing the divider
+ * @offset: offset address of register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @flags: clk_wzrd divider flags
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ */
+struct clk_wzrd_divider {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock; /* divider lock */
+};
+
+#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
+
+/* maximum frequencies for input/output clocks per speed grade */
+static const unsigned long clk_wzrd_max_freq[] = {
+ 800000000UL,
+ 933000000UL,
+ 1066000000UL
+};
+
+/* spin lock variable for clk_wzrd */
+static DEFINE_SPINLOCK(clkwzrd_lock);
+
+static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)(divider->base + divider->offset);
+ unsigned int val;
+
+ val = readl(div_addr) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value;
+ unsigned long flags = 0;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)(divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ /* Cap the value to max */
+ if (value > WZRD_DR_MAX_INT_DIV_VALUE)
+ value = WZRD_DR_MAX_INT_DIV_VALUE;
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u8 div;
+
+ /*
+ * since we donot change parent rate we just round rate to closest
+ * achievable
+ */
+ div = DIV_ROUND_CLOSEST(*prate, rate);
+
+ return (*prate / div);
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate,
+};
+
+static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned int val;
+ u32 div, frac;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ val = readl(div_addr);
+ div = val & div_mask(divider->width);
+ frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
+
+ return ((parent_rate * 1000) / ((div * 1000) + frac));
+}
+
+static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value, pre;
+ unsigned long flags = 0;
+ unsigned long rate_div, f, clockout0_div;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ rate_div = ((parent_rate * 1000) / rate);
+ clockout0_div = rate_div / 1000;
+
+ pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
+ f = (u32)(pre - (clockout0_div * 1000));
+ f = f & WZRD_CLKOUT_FRAC_MASK;
+
+ value = ((f << WZRD_CLKOUT_DIVIDE_WIDTH) | (clockout0_div &
+ WZRD_CLKOUT_DIVIDE_MASK));
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
+ .round_rate = clk_wzrd_round_rate_f,
+ .set_rate = clk_wzrd_dynamic_reconfig_f,
+ .recalc_rate = clk_wzrd_recalc_ratef,
+};
+
+static struct clk *clk_wzrd_register_divf(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops_f;
+
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static struct clk *clk_wzrd_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ unsigned long max;
+ struct clk_notifier_data *ndata = data;
+ struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
+
+ if (clk_wzrd->suspended)
+ return NOTIFY_OK;
+
+ if (ndata->clk == clk_wzrd->clk_in1)
+ max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
+ else if (ndata->clk == clk_wzrd->axi_clk)
+ max = WZRD_ACLK_MAX_FREQ;
+ else
+ return NOTIFY_DONE; /* should never happen */
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (ndata->new_rate > max)
+ return NOTIFY_BAD;
+ return NOTIFY_OK;
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int __maybe_unused clk_wzrd_suspend(struct device *dev)
+{
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+ clk_wzrd->suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused clk_wzrd_resume(struct device *dev)
+{
+ int ret;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
+
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(dev, "unable to enable s_axi_aclk\n");
+ return ret;
+ }
+
+ clk_wzrd->suspended = false;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
+ clk_wzrd_resume);
+
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ u32 reg, reg_f, mult;
+ unsigned long rate;
+ const char *clk_name;
+ void __iomem *ctrl_reg;
+ struct clk_wzrd *clk_wzrd;
+ struct resource *mem;
+ int outputs;
+ unsigned long flags = 0;
+ struct device_node *np = pdev->dev.of_node;
+
+ clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
+
+ ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade);
+ if (!ret) {
+ if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
+ dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
+ clk_wzrd->speed_grade);
+ clk_wzrd->speed_grade = 0;
+ }
+ }
+
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1)) {
+ if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_in1 not found\n");
+ return PTR_ERR(clk_wzrd->clk_in1);
+ }
+
+ clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk)) {
+ if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "s_axi_aclk not found\n");
+ return PTR_ERR(clk_wzrd->axi_clk);
+ }
+ ret = clk_prepare_enable(clk_wzrd->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
+ return ret;
+ }
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
+ rate);
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
+
+ /* register multiplier */
+ reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ reg_f = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_FRAC_MASK) >> WZRD_CLKFBOUT_FRAC_SHIFT;
+
+ mult = ((reg * 1000) + reg_f);
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_disable_clk;
+ }
+ clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
+ (&pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clk_in1),
+ 0, mult, 1000);
+ kfree(clk_name);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
+ dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ goto err_disable_clk;
+ }
+
+ outputs = of_property_count_strings(np, "clock-output-names");
+ if (outputs == 1)
+ flags = CLK_SET_RATE_PARENT;
+ clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
+ if (!clk_name) {
+ ret = -ENOMEM;
+ goto err_rm_int_clk;
+ }
+
+ ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(0);
+ /* register div */
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
+ (&pdev->dev, clk_name,
+ __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
+ if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
+ dev_err(&pdev->dev, "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
+ goto err_rm_int_clk;
+ }
+
+ /* register div per output */
+ for (i = outputs - 1; i >= 0 ; i--) {
+ const char *clkout_name;
+
+ if (of_property_read_string_index(np, "clock-output-names", i,
+ &clkout_name)) {
+ dev_err(&pdev->dev,
+ "clock output name not specified\n");
+ ret = -EINVAL;
+ goto err_rm_int_clks;
+ }
+ if (!i)
+ clk_wzrd->clkout[i] = clk_wzrd_register_divf
+ (&pdev->dev, clkout_name,
+ clk_name, flags,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ else
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ if (IS_ERR(clk_wzrd->clkout[i])) {
+ int j;
+
+ for (j = i + 1; j < outputs; j++)
+ clk_unregister(clk_wzrd->clkout[j]);
+ dev_err(&pdev->dev,
+ "unable to register divider clock\n");
+ ret = PTR_ERR(clk_wzrd->clkout[i]);
+ goto err_rm_int_clks;
+ }
+ }
+
+ kfree(clk_name);
+
+ clk_wzrd->clk_data.clks = clk_wzrd->clkout;
+ clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+
+ if (clk_wzrd->speed_grade) {
+ clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
+
+ ret = clk_notifier_register(clk_wzrd->clk_in1,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+
+ ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+ }
+
+ return 0;
+
+err_rm_int_clks:
+ clk_unregister(clk_wzrd->clks_internal[1]);
+err_rm_int_clk:
+ kfree(clk_name);
+ clk_unregister(clk_wzrd->clks_internal[0]);
+err_disable_clk:
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return ret;
+}
+
+static int clk_wzrd_remove(struct platform_device *pdev)
+{
+ int i;
+ struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
+ clk_unregister(clk_wzrd->clkout[i]);
+ for (i = 0; i < wzrd_clk_int_max; i++)
+ clk_unregister(clk_wzrd->clks_internal[i]);
+
+ if (clk_wzrd->speed_grade) {
+ clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
+ clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
+ }
+
+ clk_disable_unprepare(clk_wzrd->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id clk_wzrd_ids[] = {
+ { .compatible = "xlnx,clocking-wizard" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
+
+static struct platform_driver clk_wzrd_driver = {
+ .driver = {
+ .name = "clk-wizard",
+ .of_match_table = clk_wzrd_ids,
+ .pm = &clk_wzrd_dev_pm_ops,
+ },
+ .probe = clk_wzrd_probe,
+ .remove = clk_wzrd_remove,
+};
+module_platform_driver(clk_wzrd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
+MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
diff --git a/drivers/clk/idt/Makefile b/drivers/clk/idt/Makefile
new file mode 100644
index 000000000000..4cf2b6e4801d
--- /dev/null
+++ b/drivers/clk/idt/Makefile
@@ -0,0 +1,3 @@
+obj-y += clk-idt8t49n24x-core.o
+obj-y += clk-idt8t49n24x-debugfs.o
+obj-y += clk-idt8t49n24x.o
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.c b/drivers/clk/idt/clk-idt8t49n24x-core.c
new file mode 100644
index 000000000000..ad23014e708f
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-core.c - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include "clk-idt8t49n24x-core.h"
+
+/*
+ * In Timing Commander, Q0 is changed from 25MHz to Q0 75MHz, the following
+ * changes occur:
+ *
+ * 2 bytes change in EEPROM data string.
+ *
+ * DSM_INT R0025[0],R0026[7:0] : 35 => 30
+ * NS2_Q0 R0040[7:0],R0041[7:0] : 14 => 4
+ *
+ * In EEPROM
+ * 1. R0026
+ * 2. R0041
+ *
+ * Note that VCO_Frequency (metadata) also changed (3500 =>3000).
+ * This reflects a change to DSM_INT.
+ *
+ * Note that the Timing Commander code has workarounds in the workflow scripts
+ * to handle dividers for the 8T49N241 (because the development of that GUI
+ * predates chip override functionality). That affects NS1_Qx (x in 1-3)
+ * and NS2_Qx. NS1_Qx contains the upper bits of NS_Qx, and NS2_Qx contains
+ * the lower bits. That is NOT the case for Q0, though. In that case NS1_Q0
+ * is the 1st stage output divider (/5, /6, /4) and NS2_Q0 is the 16-bit
+ * second stage (with actual divide being twice the value stored in the
+ * register).
+ *
+ * NS1_Q0 R003F[1:0]
+ */
+
+#define IDT24x_VCO_MIN 2999997000u
+#define IDT24x_VCO_MAX 4000004000u
+#define IDT24x_VCO_OPT 3500000000u
+#define IDT24x_MIN_INT_DIVIDER 6
+#define IDT24x_MIN_NS1 4
+#define IDT24x_MAX_NS1 6
+
+static u8 q0_ns1_options[3] = { 5, 6, 4 };
+
+/**
+ * bits_to_shift - num bits to shift given specified mask
+ * @mask: 32-bit word input to count zero bits on right
+ *
+ * Given a bit mask indicating where a value will be stored in
+ * a register, return the number of bits you need to shift the value
+ * before ORing it into the register value.
+ *
+ * Return: number of bits to shift
+ */
+int bits_to_shift(unsigned int mask)
+{
+ /* the number of zero bits on the right */
+ unsigned int c = 32;
+
+ mask &= ~mask + 1;
+ if (mask)
+ c--;
+ if (mask & 0x0000FFFF)
+ c -= 16;
+ if (mask & 0x00FF00FF)
+ c -= 8;
+ if (mask & 0x0F0F0F0F)
+ c -= 4;
+ if (mask & 0x33333333)
+ c -= 2;
+ if (mask & 0x55555555)
+ c -= 1;
+ return c;
+}
+
+/*
+ * TODO: Consider replacing this with regmap_multi_reg_write, which
+ * supports introducing a delay after each write. Experiment to see if
+ * the writes succeed consistently when using that API.
+ */
+static int regmap_bulk_write_with_retry(
+ struct regmap *map, unsigned int offset, u8 val[],
+ int val_count, int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_bulk_write(map, offset, val, val_count);
+ if (err == 0)
+ return 0;
+
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+static int regmap_write_with_retry(
+ struct regmap *map, unsigned int offset, unsigned int val,
+ int max_attempts)
+{
+ int err = 0;
+ int count = 1;
+
+ do {
+ err = regmap_write(map, offset, val);
+ if (err == 0)
+ return 0;
+ usleep_range(100, 200);
+ } while (count++ <= max_attempts);
+ return err;
+}
+
+/*
+ * TODO: Consider using regmap_multi_reg_write instead. Explore
+ * use of regmap to configure WRITE_BLOCK_SIZE, and using the delay
+ * mechanism in regmap_multi_reg_write instead of retrying multiple
+ * times (regmap_bulk_write_with_retry).
+ */
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count)
+{
+ char dbg[128];
+ u8 block[WRITE_BLOCK_SIZE];
+ unsigned int block_offset = reg;
+ int x;
+ int err = 0;
+ int currentOffset = 0;
+
+ dev_dbg(&client->dev, "I2C->0x%04x : [hex] . First byte: %02x, Second byte: %02x",
+ reg, reg >> 8, reg & 0xFF);
+ dbg[0] = 0;
+
+ for (x = 0; x < val_count; x++) {
+ char data[4];
+
+ block[currentOffset++] = val[x];
+ sprintf(data, "%02x ", val[x]);
+ strcat(dbg, data);
+ if (x > 0 && (x + 1) % WRITE_BLOCK_SIZE == 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ sprintf(dbg,
+ "(loop) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, WRITE_BLOCK_SIZE);
+ dev_dbg(&client->dev, "%s", dbg);
+ dbg[0] = '\0';
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, WRITE_BLOCK_SIZE, 5);
+ if (err != 0)
+ break;
+ block_offset += WRITE_BLOCK_SIZE;
+ currentOffset = 0;
+ }
+ }
+ if (err == 0 && currentOffset > 0) {
+ dev_dbg(&client->dev, "%s", dbg);
+ dev_dbg(&client->dev, "(final) calling regmap_bulk_write @ 0x%04x [%d bytes]",
+ block_offset, currentOffset);
+ err = regmap_bulk_write_with_retry(
+ map, block_offset, block, currentOffset, 5);
+ }
+
+ return err;
+}
+
+static int i2cwrite(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ int err;
+
+ dev_dbg(&client->dev, "I2C->0x%x : [hex] %x", reg, val);
+ err = regmap_write_with_retry(map, reg, val, 5);
+ usleep_range(100, 200);
+ return err;
+}
+
+static int i2cwritewithmask(
+ struct i2c_client *client, struct regmap *map, unsigned int reg,
+ u8 val, u8 original, u8 mask)
+{
+ return i2cwrite(client, map, reg,
+ ((val << bits_to_shift(mask)) & mask) | (original & ~mask));
+}
+
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets)
+{
+ switch (output_num) {
+ case 0:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN0_MASK;
+ offsets->dis_mask = IDT24x_REG_Q0_DIS_MASK;
+ offsets->ns1_offset = IDT24x_REG_NS1_Q0;
+ offsets->ns1_offset_mask = IDT24x_REG_NS1_Q0_MASK;
+ offsets->ns2_15_8_offset = IDT24x_REG_NS2_Q0_15_8;
+ offsets->ns2_7_0_offset = IDT24x_REG_NS2_Q0_7_0;
+ break;
+ case 1:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN1_MASK;
+ offsets->dis_mask = IDT24x_REG_Q1_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q1_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q1_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q1_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q1_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q1_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q1_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q1_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q1_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q1_7_0;
+ break;
+ case 2:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN2_MASK;
+ offsets->dis_mask = IDT24x_REG_Q2_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q2_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q2_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q2_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q2_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q2_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q2_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q2_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q2_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q2_7_0;
+ break;
+ case 3:
+ offsets->oe_offset = IDT24x_REG_OUTEN;
+ offsets->oe_mask = IDT24x_REG_OUTEN3_MASK;
+ offsets->dis_mask = IDT24x_REG_Q3_DIS_MASK;
+ offsets->n_17_16_offset = IDT24x_REG_N_Q3_17_16;
+ offsets->n_17_16_mask = IDT24x_REG_N_Q3_17_16_MASK;
+ offsets->n_15_8_offset = IDT24x_REG_N_Q3_15_8;
+ offsets->n_7_0_offset = IDT24x_REG_N_Q3_7_0;
+ offsets->nfrac_27_24_offset = IDT24x_REG_NFRAC_Q3_27_24;
+ offsets->nfrac_27_24_mask =
+ IDT24x_REG_NFRAC_Q3_27_24_MASK;
+ offsets->nfrac_23_16_offset = IDT24x_REG_NFRAC_Q3_23_16;
+ offsets->nfrac_15_8_offset = IDT24x_REG_NFRAC_Q3_15_8;
+ offsets->nfrac_7_0_offset = IDT24x_REG_NFRAC_Q3_7_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_calc_div_q0 - Calculate dividers and VCO freq to generate
+ * the specified Q0 frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * The actual output divider is ns1 * ns2 * 2. fOutput = fVCO / (ns1 * ns2 * 2)
+ *
+ * The options for ns1 (when the source is the VCO) are 4,5,6. ns2 is a
+ * 16-bit value.
+ *
+ * chip->divs: structure for specifying ns1/ns2 values. If 0 after this
+ * function, Q0 is not requested
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_div_q0(struct clk_idt24x_chip *chip)
+{
+ u8 x;
+ u32 min_div, max_div, best_vco = 0;
+ u16 min_ns2, max_ns2;
+ bool is_lower_vco = false;
+
+ chip->divs.ns1_q0 = 0;
+ chip->divs.ns2_q0 = 0;
+
+ if (chip->clk[0].requested == 0)
+ return 0;
+
+ min_div = div64_u64(
+ (u64)IDT24x_VCO_MIN, chip->clk[0].requested * 2) * 2;
+ max_div = div64_u64(
+ (u64)IDT24x_VCO_MAX, chip->clk[0].requested * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. requested: %u, min_div: %u, max_div: %u",
+ __func__, chip->clk[0].requested, min_div, max_div);
+
+ min_ns2 = div64_u64((u64)min_div, IDT24x_MAX_NS1 * 2);
+ max_ns2 = div64_u64((u64)max_div, IDT24x_MIN_NS1 * 2);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. min_ns2: %u, max_ns2: %u", __func__, min_ns2, max_ns2);
+
+ for (x = 0; x < ARRAY_SIZE(q0_ns1_options); x++) {
+ u16 y = min_ns2;
+
+ while (y <= max_ns2) {
+ u32 actual_div = q0_ns1_options[x] * y * 2;
+ u32 current_vco = actual_div *
+ chip->clk[0].requested;
+
+ if (current_vco < IDT24x_VCO_MIN)
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u < %u",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MIN);
+ else if (current_vco > IDT24x_VCO_MAX) {
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. ignore div: (ns1=%u * ns2=%u * 2 * %u) == %u > %u. EXIT LOOP.",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco, IDT24x_VCO_MAX);
+ y = max_ns2;
+ } else {
+ bool use = false;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. contender: (ns1=%u * ns2=%u * 2 * %u) == %u [in range]",
+ __func__, q0_ns1_options[x], y,
+ chip->clk[0].requested,
+ current_vco);
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > best_vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ use = true;
+ }
+ } else if (!is_lower_vco &&
+ current_vco > best_vco)
+ use = true;
+ if (use) {
+ chip->divs.ns1_q0 = x;
+ chip->divs.ns2_q0 = y;
+ best_vco = current_vco;
+ }
+ }
+ y++;
+ }
+ }
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s. best: (ns1=%u [/%u] * ns2=%u * 2 * %u) == %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0, chip->clk[0].requested, best_vco);
+ return 0;
+}
+
+/**
+ * idt24x_calc_divs - Calculate dividers to generate the specified frequency.
+ * @chip: Device data structure. contains all requested frequencies
+ * for all outputs.
+ *
+ * Calculate the clock dividers (dsmint, dsmfrac for vco; ns1/ns2 for q0,
+ * n/nfrac for q1-3) for a given target frequency.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_calc_divs(struct clk_idt24x_chip *chip)
+{
+ u32 vco = 0;
+ int result;
+
+ result = idt24x_calc_div_q0(chip);
+ if (result < 0)
+ return result;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: after idt24x_calc_div_q0. ns1: %u [/%u], ns2: %u",
+ __func__, chip->divs.ns1_q0, q0_ns1_options[chip->divs.ns1_q0],
+ chip->divs.ns2_q0);
+
+ chip->divs.dsmint = 0;
+ chip->divs.dsmfrac = 0;
+
+ if (chip->clk[0].requested > 0) {
+ /* Q0 is in use and is governing the actual VCO freq */
+ vco = q0_ns1_options[chip->divs.ns1_q0] * chip->divs.ns2_q0 *
+ 2 * chip->clk[0].requested;
+ } else {
+ u32 freq = 0;
+ u32 walk;
+ u32 min_div, max_div;
+ bool is_lower_vco = false;
+
+ /*
+ * Q0 is not in use. Use the first requested (fractional)
+ * output frequency as the one controlling the VCO.
+ */
+ for (walk = 1; walk < NUM_OUTPUTS; walk++) {
+ if (chip->clk[walk].requested != 0) {
+ freq = chip->clk[walk].requested;
+ break;
+ }
+ }
+
+ if (freq == 0) {
+ dev_err(&chip->i2c_client->dev,
+ "%s: NO FREQUENCIES SPECIFIED", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * First, determine the min/max div for the output frequency.
+ */
+ min_div = IDT24x_MIN_INT_DIVIDER;
+ max_div = div64_u64((u64)IDT24x_VCO_MAX, freq * 2) * 2;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. freq: %u, min_div: %u, max_div: %u",
+ __func__, freq, min_div, max_div);
+
+ walk = min_div;
+
+ while (walk <= max_div) {
+ u32 current_vco = freq * walk;
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: calc_divs for fractional output. walk: %u, freq: %u, vco: %u",
+ __func__, walk, freq, vco);
+ if (current_vco >= IDT24x_VCO_MIN &&
+ vco <= IDT24x_VCO_MAX) {
+ if (current_vco <= IDT24x_VCO_OPT) {
+ if (current_vco > vco ||
+ !is_lower_vco) {
+ is_lower_vco = true;
+ vco = current_vco;
+ }
+ } else if (!is_lower_vco && current_vco > vco) {
+ vco = current_vco;
+ }
+ }
+ /* Divider must be even. */
+ walk += 2;
+ }
+ }
+
+ if (vco != 0) {
+ u32 pfd;
+ u64 rem;
+ int x;
+
+ /* Setup dividers for outputs with fractional dividers. */
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested != 0) {
+ /*
+ * The value written to the chip is half
+ * the calculated divider.
+ */
+ chip->divs.nint[x - 1] = div64_u64_rem(
+ (u64)vco,
+ chip->clk[x].requested * 2,
+ &rem);
+ chip->divs.nfrac[x - 1] = div64_u64(
+ rem * 1 << 28,
+ chip->clk[x].requested * 2);
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: div to get Q%i freq %u from vco %u: int part: %u, rem: %llu, frac part: %u",
+ __func__, x,
+ chip->clk[x].requested,
+ vco, chip->divs.nint[x - 1], rem,
+ chip->divs.nfrac[x - 1]);
+ }
+ }
+
+ /* Calculate freq for pfd */
+ pfd = chip->input_clk_freq * (chip->doubler_disabled ? 1 : 2);
+
+ /*
+ * Calculate dsmint & dsmfrac:
+ * -----------------------------
+ * dsm = float(vco)/float(pfd)
+ * dsmfrac = dsm-floor(dsm) * 2^21
+ * rem = vco % pfd
+ * therefore:
+ * dsmfrac = (rem * 2^21)/pfd
+ */
+ chip->divs.dsmint = div64_u64_rem(vco, pfd, &rem);
+ chip->divs.dsmfrac = div64_u64(rem * 1 << 21, pfd);
+
+ dev_dbg(&chip->i2c_client->dev,
+ "%s: vco: %u, pfd: %u, dsmint: %u, dsmfrac: %u, rem: %llu",
+ __func__, vco, pfd, chip->divs.dsmint,
+ chip->divs.dsmfrac, rem);
+ } else {
+ dev_err(&chip->i2c_client->dev,
+ "%s: no integer divider in range found. NOT SUPPORTED.",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_enable_output - Enable/disable a particular output
+ * @chip: Device data structure
+ * @output: Output to enable/disable
+ * @enable: Enable (true/false)
+ *
+ * Return: passes on regmap_write return value.
+ */
+static int idt24x_enable_output(
+ struct clk_idt24x_chip *chip, u8 output, bool enable)
+{
+ struct clk_register_offsets offsets;
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+
+ /*
+ * When an output is enabled, enable it in the original
+ * data read from the chip and cached. Otherwise it may be
+ * accidentally turned off when another output is enabled.
+ *
+ * E.g., the driver starts with all outputs off in reg_out_en_x.
+ * Q1 is enabled with the appropriate mask. Q2 is then enabled,
+ * which results in Q1 being turned back off (because Q1 was off
+ * in reg_out_en_x).
+ */
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets for %d: %i",
+ __func__, output, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x before: 0x%x, reg_out_mode_0_1 before: 0x%x, reg_out_mode_2_3 before: 0x%x, reg_qx_dis before: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_out_mode_0_1, chip->reg_out_mode_2_3,
+ chip->reg_qx_dis);
+
+ chip->reg_out_en_x = chip->reg_out_en_x & ~offsets.oe_mask;
+ if (enable)
+ chip->reg_out_en_x |= (1 << bits_to_shift(offsets.oe_mask));
+
+ chip->reg_qx_dis = chip->reg_qx_dis & ~offsets.dis_mask;
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_qx_dis mask: 0x%x, before checking enable: 0x%x",
+ __func__, output, enable, offsets.dis_mask,
+ chip->reg_qx_dis);
+ if (!enable)
+ chip->reg_qx_dis |= (1 << bits_to_shift(offsets.dis_mask));
+
+ dev_dbg(&client->dev,
+ "%s: q%u enable? %d. reg_out_en_x after: 0x%x, reg_qx_dis after: 0x%x",
+ __func__, output, enable, chip->reg_out_en_x,
+ chip->reg_qx_dis);
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTEN, chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE0_1,
+ chip->reg_out_mode_0_1);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_OUTMODE2_3,
+ chip->reg_out_mode_2_3);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_Q_DIS, chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * idt24x_update_device - write registers to the chip
+ * @chip: Device data structure
+ *
+ * Write all values to hardware that we have calculated.
+ *
+ * Return: passes on regmap_bulk_write return value.
+ */
+static int idt24x_update_device(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x = -1;
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_8 (val %u @ %u)",
+ __func__, chip->divs.dsmint >> 8,
+ IDT24x_REG_DSM_INT_8);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSM_INT_8,
+ (chip->divs.dsmint >> 8) & IDT24x_REG_DSM_INT_8_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSM_INT_8_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting DSM_INT_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmint & 0xFF,
+ IDT24x_REG_DSM_INT_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSM_INT_7_0,
+ chip->divs.dsmint & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSM_INT_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_20_16 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac >> 16,
+ IDT24x_REG_DSMFRAC_20_16);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_20_16,
+ (chip->divs.dsmfrac >> 16) & IDT24x_REG_DSMFRAC_20_16_MASK,
+ chip->reg_dsm_int_8, IDT24x_REG_DSMFRAC_20_16_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_20_16: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.dsmfrac >> 8) & 0xFF,
+ IDT24x_REG_DSMFRAC_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_15_8,
+ (chip->divs.dsmfrac >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_DSMFRAC_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.dsmfrac & 0xFF,
+ IDT24x_REG_DSMFRAC_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_DSMFRAC_7_0,
+ chip->divs.dsmfrac & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_DSMFRAC_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS1_Q0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns1_q0, IDT24x_REG_NS1_Q0);
+ err = i2cwritewithmask(
+ client, chip->regmap, IDT24x_REG_NS1_Q0,
+ chip->divs.ns1_q0 & IDT24x_REG_NS1_Q0_MASK,
+ chip->reg_ns1_q0, IDT24x_REG_NS1_Q0_MASK);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_15_8 (val %u @ 0x%x)",
+ __func__, (chip->divs.ns2_q0 >> 8) & 0xFF,
+ IDT24x_REG_NS2_Q0_15_8);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_15_8,
+ (chip->divs.ns2_q0 >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_15_8: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting IDT24x_REG_NS2_Q0_7_0 (val %u @ 0x%x)",
+ __func__, chip->divs.ns2_q0 & 0xFF,
+ IDT24x_REG_NS2_Q0_7_0);
+ err = i2cwrite(
+ client, chip->regmap, IDT24x_REG_NS2_Q0_7_0,
+ chip->divs.ns2_q0 & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting IDT24x_REG_NS2_Q0_7_0: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_enable_output for Q0. requestedFreq: %u",
+ __func__, chip->clk[0].requested);
+ idt24x_enable_output(chip, 0, chip->clk[0].requested != 0);
+
+ dev_dbg(&client->dev,
+ "%s: writing values for q1-q3", __func__);
+ for (x = 1; x < NUM_OUTPUTS; x++) {
+ struct clk_register_offsets offsets;
+
+ if (chip->clk[x].requested != 0) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_get_offsets for %u",
+ __func__, x);
+ err = idt24x_get_offsets(x, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: (q%u, nint: %u, nfrac: %u)",
+ __func__, x, chip->divs.nint[x - 1],
+ chip->divs.nfrac[x - 1]);
+
+ dev_dbg(&client->dev,
+ "%s: setting n_17_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] >> 16,
+ offsets.n_17_16_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap, offsets.n_17_16_offset,
+ (chip->divs.nint[x - 1] >> 16) &
+ offsets.n_17_16_mask,
+ chip->reg_n_qx_17_16[x - 1],
+ offsets.n_17_16_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_17_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF,
+ offsets.n_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_15_8_offset,
+ (chip->divs.nint[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting n_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nint[x - 1] & 0xFF,
+ offsets.n_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.n_7_0_offset,
+ chip->divs.nint[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting n_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_27_24_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 24),
+ offsets.nfrac_27_24_offset);
+ err = i2cwritewithmask(
+ client, chip->regmap,
+ offsets.nfrac_27_24_offset,
+ (chip->divs.nfrac[x - 1] >> 24) &
+ offsets.nfrac_27_24_mask,
+ chip->reg_nfrac_qx_27_24[x - 1],
+ offsets.nfrac_27_24_mask);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_27_24_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_23_16_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF,
+ offsets.nfrac_23_16_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_23_16_offset,
+ (chip->divs.nfrac[x - 1] >> 16) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_23_16_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_15_8_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF,
+ offsets.nfrac_15_8_offset);
+ err = i2cwrite(
+ client, chip->regmap,
+ offsets.nfrac_15_8_offset,
+ (chip->divs.nfrac[x - 1] >> 8) & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_15_8_offset: %i",
+ __func__, err);
+ return err;
+ }
+
+ dev_dbg(&client->dev,
+ "%s: setting nfrac_7_0_offset (q%u, val %u @ 0x%x)",
+ __func__, x,
+ chip->divs.nfrac[x - 1] & 0xFF,
+ offsets.nfrac_7_0_offset);
+ err = i2cwrite(
+ client, chip->regmap, offsets.nfrac_7_0_offset,
+ chip->divs.nfrac[x - 1] & 0xFF);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error setting nfrac_7_0_offset: %i",
+ __func__, err);
+ return err;
+ }
+ }
+ idt24x_enable_output(chip, x,
+ chip->clk[x].requested != 0);
+ chip->clk[x].actual = chip->clk[x].requested;
+ }
+ return 0;
+}
+
+/**
+ * idt24x_set_frequency - Adjust output frequency on the attached chip.
+ * @chip: Device data structure, including all requested frequencies.
+ *
+ * Return: 0 on success.
+ */
+int idt24x_set_frequency(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ int x;
+ bool all_disabled = true;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ if (chip->clk[x].requested == 0) {
+ idt24x_enable_output(chip, x, false);
+ chip->clk[x].actual = 0;
+ } else {
+ all_disabled = false;
+ }
+ }
+
+ if (all_disabled)
+ /*
+ * no requested frequencies, so nothing else to calculate
+ * or write to the chip. If the consumer wants to disable
+ * all outputs, they can request 0 for all frequencies.
+ */
+ return 0;
+
+ if (chip->input_clk_freq == 0) {
+ dev_err(&client->dev,
+ "%s: no input frequency; can't continue.", __func__);
+ return -EINVAL;
+ }
+
+ err = idt24x_calc_divs(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_calc_divs: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = idt24x_update_device(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error updating the device: %i",
+ __func__, err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-core.h b/drivers/clk/idt/clk-idt8t49n24x-core.h
new file mode 100644
index 000000000000..247ec070c621
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-core.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-core.h - Program 8T49N24x settings via I2C (common code)
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_CORE_H_
+#define __IDT_CLK_IDT8T49N24X_CORE_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+/*
+ * The configurations in the settings file have 0x317 registers (last offset
+ * is 0x316).
+ */
+#define NUM_CONFIG_REGISTERS 0x317
+#define NUM_INPUTS 2
+#define NUM_OUTPUTS 4
+#define DEBUGFS_BUFFER_LENGTH 200
+#define WRITE_BLOCK_SIZE 32
+
+/* Non output-specific registers */
+#define IDT24x_REG_DBL_DIS 0x6C
+#define IDT24x_REG_DBL_DIS_MASK 0x01
+#define IDT24x_REG_DSM_INT_8 0x25
+#define IDT24x_REG_DSM_INT_8_MASK 0x01
+#define IDT24x_REG_DSM_INT_7_0 0x26
+#define IDT24x_REG_DSMFRAC_20_16 0x28
+#define IDT24x_REG_DSMFRAC_20_16_MASK 0x1F
+#define IDT24x_REG_DSMFRAC_15_8 0x29
+#define IDT24x_REG_DSMFRAC_7_0 0x2A
+#define IDT24x_REG_OUTEN 0x39
+#define IDT24x_REG_OUTMODE0_1 0x3E
+#define IDT24x_REG_OUTMODE2_3 0x3D
+#define IDT24x_REG_Q_DIS 0x6F
+
+/* Q0 */
+#define IDT24x_REG_OUTEN0_MASK 0x01
+#define IDT24x_REG_OUTMODE0_MASK 0x0E
+#define IDT24x_REG_Q0_DIS_MASK 0x01
+#define IDT24x_REG_NS1_Q0 0x3F
+#define IDT24x_REG_NS1_Q0_MASK 0x03
+#define IDT24x_REG_NS2_Q0_15_8 0x40
+#define IDT24x_REG_NS2_Q0_7_0 0x41
+
+/* Q1 */
+#define IDT24x_REG_OUTEN1_MASK 0x02
+#define IDT24x_REG_OUTMODE1_MASK 0xE0
+#define IDT24x_REG_Q1_DIS_MASK 0x02
+#define IDT24x_REG_N_Q1_17_16 0x42
+#define IDT24x_REG_N_Q1_17_16_MASK 0x03
+#define IDT24x_REG_N_Q1_15_8 0x43
+#define IDT24x_REG_N_Q1_7_0 0x44
+#define IDT24x_REG_NFRAC_Q1_27_24 0x57
+#define IDT24x_REG_NFRAC_Q1_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q1_23_16 0x58
+#define IDT24x_REG_NFRAC_Q1_15_8 0x59
+#define IDT24x_REG_NFRAC_Q1_7_0 0x5A
+
+/* Q2 */
+#define IDT24x_REG_OUTEN2_MASK 0x04
+#define IDT24x_REG_OUTMODE2_MASK 0x0E
+#define IDT24x_REG_Q2_DIS_MASK 0x04
+#define IDT24x_REG_N_Q2_17_16 0x45
+#define IDT24x_REG_N_Q2_17_16_MASK 0x03
+#define IDT24x_REG_N_Q2_15_8 0x46
+#define IDT24x_REG_N_Q2_7_0 0x47
+#define IDT24x_REG_NFRAC_Q2_27_24 0x5B
+#define IDT24x_REG_NFRAC_Q2_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q2_23_16 0x5C
+#define IDT24x_REG_NFRAC_Q2_15_8 0x5D
+#define IDT24x_REG_NFRAC_Q2_7_0 0x5E
+
+/* Q3 */
+#define IDT24x_REG_OUTEN3_MASK 0x08
+#define IDT24x_REG_OUTMODE3_MASK 0xE0
+#define IDT24x_REG_Q3_DIS_MASK 0x08
+#define IDT24x_REG_N_Q3_17_16 0x48
+#define IDT24x_REG_N_Q3_17_16_MASK 0x03
+#define IDT24x_REG_N_Q3_15_8 0x49
+#define IDT24x_REG_N_Q3_7_0 0x4A
+#define IDT24x_REG_NFRAC_Q3_27_24 0x5F
+#define IDT24x_REG_NFRAC_Q3_27_24_MASK 0x0F
+#define IDT24x_REG_NFRAC_Q3_23_16 0x60
+#define IDT24x_REG_NFRAC_Q3_15_8 0x61
+#define IDT24x_REG_NFRAC_Q3_7_0 0x62
+
+/**
+ * struct idt24x_output - device output information
+ * @hw: hw registration info for this specific output clcok. This gets
+ * passed as an argument to CCF api calls (e.g., set_rate).
+ * container_of can then be used to get the reference to this
+ * struct.
+ * @chip: store a reference to the parent device structure. container_of
+ * cannot be used to get to the parent device structure from
+ * idt24x_output, because clk_idt24x_chip contains an array of
+ * output structs (for future enhancements to support devices
+ * with different numbers of output clocks).
+ * @index: identifies output on the chip; used in debug statements
+ * @requested: requested output clock frequency (in Hz)
+ * @actual: actual output clock frequency (in Hz). Will only be set after
+ * successful update of the device.
+ * @debug_freq: stores value for debugfs file. Use this instead of requested
+ * struct var because debugfs expects u64, not u32.
+ */
+struct idt24x_output {
+ struct clk_hw hw;
+ struct clk_idt24x_chip *chip;
+ u8 index;
+ u32 requested;
+ u32 actual;
+ u64 debug_freq;
+};
+
+/**
+ * struct idt24x_dividers - output dividers
+ * @dsmint: int component of feedback divider for VCO (2-stage divider)
+ * @dsmfrac: fractional component of feedback divider for VCO
+ * @ns1_q0: ns1 divider component for Q0
+ * @ns2_q0: ns2 divider component for Q0
+ * @nint: int divider component for Q1-3
+ * @nfrac: fractional divider component for Q1-3
+ */
+struct idt24x_dividers {
+ u16 dsmint;
+ u32 dsmfrac;
+
+ u8 ns1_q0;
+ u16 ns2_q0;
+
+ u32 nint[3];
+ u32 nfrac[3];
+};
+
+/**
+ * struct clk_idt24x_chip - device info for chip
+ * @regmap: register map used to perform i2c writes to the chip
+ * @i2c_client: i2c_client struct passed to probe
+ * @min_freq: min frequency for this chip
+ * @max_freq: max frequency for this chip
+ * @settings: filled in if full register map is specified in the DT
+ * @has_settings: true if settings array is valid
+ * @input_clk: ptr to input clock specified in DT
+ * @input_clk_num: which input clock was specified. 0-based. A value of
+ * NUM_INPUTS indicates that a XTAL is used as the input.
+ * @input_clk_nb: notification support (if input clk changes)
+ * @input_clk_freq: current freq of input_clk
+ * @doubler_disabled: whether input doubler is enabled. This value is read
+ * from the hw on probe (in case it is set in @settings).
+ * @clk: array of outputs. One entry per output supported by the
+ * chip. Frequencies requested via the ccf api will be
+ * recorded in this array.
+ * @reg_dsm_int_8: record current value from hw to avoid modifying
+ * when writing register values
+ * @reg_dsm_frac_20_16: record current value
+ * @reg_out_en_x: record current value
+ * @reg_out_mode_0_1: record current value
+ * @reg_out_mode_2_3: record current value
+ * @reg_qx_dis: record current value
+ * @reg_ns1_q0: record current value
+ * @reg_n_qx_17_16: record current value
+ * @reg_nfrac_qx_27_24: record current value
+ * @divs: output divider values for all outputs
+ * @debugfs_dirroot: debugfs support
+ * @debugfs_fileaction: debugfs support
+ * @debugfs_filei2c: debugfs support
+ * @debugfs_map: debugfs support
+ * @dbg_cache: debugfs support
+ * @debugfs_fileqfreq: debugfs support
+ */
+struct clk_idt24x_chip {
+ struct regmap *regmap;
+ struct i2c_client *i2c_client;
+
+ u32 min_freq;
+ u32 max_freq;
+
+ u8 settings[NUM_CONFIG_REGISTERS];
+
+ bool has_settings;
+
+ struct clk *input_clk;
+ int input_clk_num;
+ struct notifier_block input_clk_nb;
+ u32 input_clk_freq;
+
+ bool doubler_disabled;
+
+ struct idt24x_output clk[NUM_OUTPUTS];
+
+ unsigned int reg_dsm_int_8;
+ unsigned int reg_dsm_frac_20_16;
+ unsigned int reg_out_en_x;
+ unsigned int reg_out_mode_0_1;
+ unsigned int reg_out_mode_2_3;
+ unsigned int reg_qx_dis;
+ unsigned int reg_ns1_q0;
+ unsigned int reg_n_qx_17_16[3];
+ unsigned int reg_nfrac_qx_27_24[3];
+
+ struct idt24x_dividers divs;
+
+ struct dentry *debugfs_dirroot, *debugfs_fileaction, *debugfs_filei2c,
+ *debugfs_map;
+ char dbg_cache[DEBUGFS_BUFFER_LENGTH];
+ struct dentry *debugfs_fileqfreq[4];
+};
+
+#define to_idt24x_output(_hw) \
+ container_of(_hw, struct idt24x_output, hw)
+#define to_clk_idt24x_from_client(_client) \
+ container_of(_client, struct clk_idt24x_chip, i2c_client)
+#define to_clk_idt24x_from_nb(_nb) \
+ container_of(_nb, struct clk_idt24x_chip, input_clk_nb)
+
+/**
+ * struct clk_register_offsets - register offsets for current context
+ * @oe_offset: offset for current output enable and mode
+ * @oe_mask: mask for current output enable
+ * @dis_mask: mask for current output disable
+ * @n_17_16_offset: offset for current output int divider (bits 17:16)
+ * @n_17_16_mask: mask for current output int divider (bits 17:16)
+ * @n_15_8_offset: offset for current output int divider (bits 15:8)
+ * @n_7_0_offset: offset for current output int divider (bits 7:0)
+ * @nfrac_27_24_offset: offset for current output frac divider (bits 27:24)
+ * @nfrac_27_24_mask: mask for current output frac divider (bits 27:24)
+ * @nfrac_23_16_offset: offset for current output frac divider (bits 23:16)
+ * @nfrac_15_8_offset: offset for current output frac divider (bits 15:8)
+ * @nfrac_7_0_offset: offset for current output frac divider (bits 7:0)
+ * @ns1_offset: offset for stage 1 div for output Q0
+ * @ns1_offset_mask: mask for stage 1 div for output Q0
+ * @ns2_15_8_offset: offset for stage 2 div for output Q0 (bits 15:8)
+ * @ns2_7_0_offset: offset for stage 2 div for output Q0 (bits 7:0)
+ */
+struct clk_register_offsets {
+ u16 oe_offset;
+ u8 oe_mask;
+ u8 dis_mask;
+
+ u16 n_17_16_offset;
+ u8 n_17_16_mask;
+ u16 n_15_8_offset;
+ u16 n_7_0_offset;
+ u16 nfrac_27_24_offset;
+ u8 nfrac_27_24_mask;
+ u16 nfrac_23_16_offset;
+ u16 nfrac_15_8_offset;
+ u16 nfrac_7_0_offset;
+
+ u16 ns1_offset;
+ u8 ns1_offset_mask;
+ u16 ns2_15_8_offset;
+ u16 ns2_7_0_offset;
+};
+
+int bits_to_shift(unsigned int mask);
+int i2cwritebulk(
+ struct i2c_client *client, struct regmap *map,
+ unsigned int reg, u8 val[], size_t val_count);
+int idt24x_get_offsets(
+ u8 output_num,
+ struct clk_register_offsets *offsets);
+int idt24x_set_frequency(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_CORE_H_ */
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.c b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
new file mode 100644
index 000000000000..967a9df8701c
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x-debugfs.c - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-debugfs.h"
+
+static struct clk_idt24x_chip *idt24x_chip_fordebugfs;
+
+static int idt24x_read_all_settings(
+ struct clk_idt24x_chip *chip, char *output_buffer, int count)
+{
+ u8 settings[NUM_CONFIG_REGISTERS];
+ int err = 0;
+ int x;
+
+ err = regmap_bulk_read(
+ chip->regmap, 0x0, settings, NUM_CONFIG_REGISTERS);
+ if (!err) {
+ output_buffer[0] = '\0';
+ for (x = 0; x < ARRAY_SIZE(settings); x++) {
+ char dbg[4];
+
+ if ((strlen(output_buffer) + 4) > count)
+ return -EINVAL;
+ sprintf(dbg, "%02x ", settings[x]);
+ strcat(output_buffer, dbg);
+ }
+ }
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_action - Write handler for the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: result of call to simple_write_to_buffer
+ *
+ * Use the "action" file as a trigger for setting all requested
+ * rates. The driver doesn't get any notification when the files
+ * representing the Qx outputs are written to, so something else is
+ * needed to notify the driver that the device should be udpated.
+ *
+ * It doesn't matter what you write to the action debugs file. When the
+ * handler is called, the device will be updated.
+ */
+static ssize_t idt24x_debugfs_writer_action(
+ struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x;
+ u32 freq;
+ bool needs_update = true;
+ struct i2c_client *client = idt24x_chip_fordebugfs->i2c_client;
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ freq = idt24x_chip_fordebugfs->clk[x].debug_freq;
+ if (freq) {
+ needs_update = false;
+ dev_dbg(&client->dev,
+ "%s: calling clk_set_rate with debug frequency for Q%i",
+ __func__, x);
+ err = clk_set_rate(
+ idt24x_chip_fordebugfs->clk[x].hw.clk, freq);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling clk_set_rate for Q%i (%i)\n",
+ x, err);
+ }
+ } else {
+ needs_update = true;
+ idt24x_chip_fordebugfs->clk[x].requested = 0;
+ dev_dbg(&client->dev,
+ "%s: debug frequency for Q%i not set; make sure clock is disabled",
+ __func__, x);
+ }
+ }
+
+ if (needs_update) {
+ dev_dbg(&client->dev,
+ "%s: calling idt24x_set_frequency to ensure any clocks that should be disabled are turned off.",
+ __func__);
+ err = idt24x_set_frequency(idt24x_chip_fordebugfs);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "%s: error calling idt24x_set_frequency (%i)\n",
+ __func__, err);
+ return err;
+ }
+ }
+
+ return simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+}
+
+/**
+ * idt24x_debugfs_reader_action - Read the "action" debugfs file.
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Return: whatever was last written to the "action" debugfs file.
+ */
+static ssize_t idt24x_debugfs_reader_action(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ return simple_read_from_buffer(
+ user_buffer, count, position, idt24x_chip_fordebugfs->dbg_cache,
+ DEBUGFS_BUFFER_LENGTH);
+}
+
+/**
+ * idt24x_debugfs_reader_map - display the current registers on the device
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Reads the current register map from the attached chip via I2C and
+ * returns it.
+ *
+ * Return: result of call to simple_read_from_buffer
+ */
+static ssize_t idt24x_debugfs_reader_map(
+ struct file *fp, char __user *user_buffer, size_t count,
+ loff_t *position)
+{
+ int err = 0;
+ char *buf = kzalloc(5000, GFP_KERNEL);
+
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "calling idt24x_read_all_settings (count: %zu)\n", count);
+ err = idt24x_read_all_settings(idt24x_chip_fordebugfs, buf, 5000);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error calling idt24x_read_all_settings (%i)\n", err);
+ return 0;
+ }
+ /* TMGCDR-1456. We're returning 1 byte too few. */
+ err = simple_read_from_buffer(
+ user_buffer, count, position, buf, strlen(buf));
+ kfree(buf);
+ return err;
+}
+
+/**
+ * idt24x_handle_i2c_debug_token - process "token" written to the i2c file
+ * @dev: pointer to device structure
+ * @token: pointer to current char being examined
+ * @reg: pass in current register, or return register from token.
+ * @val: resulting array of bytes being parsed
+ * @nextbyte: position in val array to store next byte
+ *
+ * Utility function to operate on the current "token" (from within a
+ * space-delimited string) written to the i2c debugfs file. It will
+ * either be a register offset or a byte to be added to the val array.
+ * If it is added to the val array, auto-increment nextbyte.
+ *
+ * Return: 0 for success
+ */
+static int idt24x_handle_i2c_debug_token(
+ const struct device *dev, char *token, unsigned int *reg,
+ u8 val[], u16 *nextbyte)
+{
+ int err = 0;
+
+ dev_dbg(dev, "got token (%s)\n", token);
+ if (*reg == -1) {
+ err = kstrtouint(token, 16, reg);
+ if (!err)
+ dev_dbg(dev, "hex register address == 0x%x\n", *reg);
+ } else {
+ u8 temp;
+
+ err = kstrtou8(token, 16, &temp);
+ if (!err) {
+ dev_dbg(dev, "data byte == 0x%x\n", temp);
+ val[*nextbyte] = temp;
+ *nextbyte += 1;
+ }
+ }
+ if (err == -ERANGE)
+ dev_err(dev, "ERANGE error when parsing data\n");
+ else if (err == -EINVAL)
+ dev_err(dev, "EINVAL error when parsing data\n");
+ else if (err)
+ dev_err(dev, "error when parsing data: %i\n", err);
+ return err;
+}
+
+/**
+ * idt24x_debugfs_writer_i2c - debugfs handler for i2c file
+ * @fp: file pointer
+ * @user_buffer: buffer of text written to file
+ * @count: size of text in buffer
+ * @position: pass in current position, return new position
+ *
+ * Handler for the "i2c" debugfs file. Write to this file to write bytes
+ * via I2C to a particular offset.
+ *
+ * Usage: echo 006c 01 02 0D FF > i2c
+ *
+ * First 4 chars are the 2-byte i2c register offset. Then follow that
+ * with a sequence of 2-char bytes in hex format that you want to write
+ * starting at that offset.
+ *
+ * Return: result of simple_write_to_buffer
+ */
+static ssize_t idt24x_debugfs_writer_i2c(struct file *fp,
+ const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ int err = 0;
+ int x = 0;
+ int start = 0;
+ ssize_t written;
+ unsigned int reg = -1;
+ u8 val[WRITE_BLOCK_SIZE];
+ u16 nextbyte = 0;
+ char token[16];
+
+ if (count > DEBUGFS_BUFFER_LENGTH)
+ return -EINVAL;
+
+ written = simple_write_to_buffer(
+ idt24x_chip_fordebugfs->dbg_cache, DEBUGFS_BUFFER_LENGTH,
+ position, user_buffer, count);
+ if (written != count) {
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "write count != expected count");
+ return written;
+ }
+
+ for (x = 0; x < count; x++) {
+ token[x - start] = idt24x_chip_fordebugfs->dbg_cache[x];
+ if (idt24x_chip_fordebugfs->dbg_cache[x] == ' ') {
+ token[x - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev,
+ token, &reg, val, &nextbyte);
+ if (err)
+ break;
+ start = x + 1;
+ }
+ }
+
+ /* handle the last token */
+ if (!err) {
+ token[count - start] = '\0';
+ err = idt24x_handle_i2c_debug_token(
+ &idt24x_chip_fordebugfs->i2c_client->dev, token, &reg,
+ val, &nextbyte);
+ }
+
+ if (!err && reg != -1 && nextbyte > 0) {
+ err = i2cwritebulk(
+ idt24x_chip_fordebugfs->i2c_client,
+ idt24x_chip_fordebugfs->regmap,
+ reg, val, nextbyte);
+ if (err) {
+ dev_err(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "error writing data chip (%i)\n", err);
+ return err;
+ }
+ dev_dbg(&idt24x_chip_fordebugfs->i2c_client->dev,
+ "successfully wrote i2c data to chip");
+ }
+
+ return written;
+}
+
+static const struct file_operations idt24x_fops_debug_action = {
+ .read = idt24x_debugfs_reader_action,
+ .write = idt24x_debugfs_writer_action,
+};
+
+static const struct file_operations idt24x_fops_debug_map = {
+ .read = idt24x_debugfs_reader_map
+};
+
+static const struct file_operations idt24x_fops_debug_i2c = {
+ .write = idt24x_debugfs_writer_i2c,
+};
+
+/**
+ * idt24x_expose_via_debugfs - Set up all debugfs files
+ * @client: pointer to i2c_client structure
+ * @chip: Device data structure
+ *
+ * Sets up all debugfs files to use for debugging the driver.
+ * Return: error code. 0 if success or debugfs doesn't appear to be enabled.
+ */
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip)
+{
+ int output_num;
+
+ /*
+ * create root directory in /sys/kernel/debugfs
+ */
+ chip->debugfs_dirroot = debugfs_create_dir("idt24x", NULL);
+ if (!chip->debugfs_dirroot) {
+ /* debugfs probably not enabled. Don't fail the probe. */
+ return 0;
+ }
+
+ /*
+ * create files in the root directory. This requires read and
+ * write file operations
+ */
+ chip->debugfs_fileaction = debugfs_create_file(
+ "action", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_action);
+ if (!chip->debugfs_fileaction) {
+ dev_err(&client->dev,
+ "%s: error creating action file", __func__);
+ return (-ENODEV);
+ }
+
+ chip->debugfs_map = debugfs_create_file(
+ "map", 0444, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_map);
+ if (!chip->debugfs_map) {
+ dev_err(&client->dev,
+ "%s: error creating map file", __func__);
+ return (-ENODEV);
+ }
+
+ for (output_num = 0; output_num < NUM_OUTPUTS; output_num++) {
+ char name[5];
+
+ sprintf(name, "q%d", output_num);
+ chip->debugfs_fileqfreq[output_num] = debugfs_create_u64(
+ name, 0644, chip->debugfs_dirroot,
+ &chip->clk[output_num].debug_freq);
+ if (!chip->debugfs_fileqfreq[output_num]) {
+ dev_err(&client->dev,
+ "%s: error creating %s debugfs file",
+ __func__, name);
+ return (-ENODEV);
+ }
+ }
+
+ chip->debugfs_filei2c = debugfs_create_file(
+ "i2c", 0644, chip->debugfs_dirroot, NULL,
+ &idt24x_fops_debug_i2c);
+ if (!chip->debugfs_filei2c) {
+ dev_err(&client->dev,
+ "%s: error creating i2c file", __func__);
+ return (-ENODEV);
+ }
+
+ dev_dbg(&client->dev, "%s: success", __func__);
+ idt24x_chip_fordebugfs = chip;
+ return 0;
+}
+
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip)
+{
+ debugfs_remove_recursive(chip->debugfs_dirroot);
+}
diff --git a/drivers/clk/idt/clk-idt8t49n24x-debugfs.h b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
new file mode 100644
index 000000000000..673016c8e747
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x-debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* clk-idt8t49n24x-debugfs.h - Debugfs support for 8T49N24x
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#ifndef __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+#define __IDT_CLK_IDT8T49N24X_DEBUGFS_H_
+
+#include "clk-idt8t49n24x-core.h"
+
+int idt24x_expose_via_debugfs(struct i2c_client *client,
+ struct clk_idt24x_chip *chip);
+void idt24x_cleanup_debugfs(struct clk_idt24x_chip *chip);
+
+#endif /* __IDT_CLK_IDT8T49N24X_DEBUGFS_H_*/
diff --git a/drivers/clk/idt/clk-idt8t49n24x.c b/drivers/clk/idt/clk-idt8t49n24x.c
new file mode 100644
index 000000000000..878637986a60
--- /dev/null
+++ b/drivers/clk/idt/clk-idt8t49n24x.c
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0
+/* clk-idt8t49n24x.c - Program 8T49N24x settings via I2C.
+ *
+ * Copyright (C) 2018, Integrated Device Technology, Inc. <david.cater@idt.com>
+ *
+ * See https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html
+ * This program is distributed "AS IS" and WITHOUT ANY WARRANTY;
+ * including the implied warranties of MERCHANTABILITY, FITNESS FOR
+ * A PARTICULAR PURPOSE, or NON-INFRINGEMENT.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "clk-idt8t49n24x-core.h"
+#include "clk-idt8t49n24x-debugfs.h"
+
+#define OUTPUTMODE_HIGHZ 0
+#define OUTPUTMODE_LVDS 2
+#define IDT24x_MIN_FREQ 1000000L
+#define IDT24x_MAX_FREQ 300000000L
+#define DRV_NAME "idt8t49n24x"
+
+enum clk_idt24x_variant {
+ idt24x
+};
+
+static u32 mask_and_shift(u32 value, u8 mask)
+{
+ value &= mask;
+ return value >> bits_to_shift(mask);
+}
+
+/**
+ * idt24x_set_output_mode - Set the mode for a particular clock
+ * output in the register.
+ * @reg: The current register value before setting the mode.
+ * @mask: The bitmask identifying where in the register the
+ * output mode is stored.
+ * @mode: The mode to set.
+ *
+ * Return: the new register value with the specified mode bits set.
+ */
+static int idt24x_set_output_mode(u32 reg, u8 mask, u8 mode)
+{
+ if (((reg & mask) >> bits_to_shift(mask)) == OUTPUTMODE_HIGHZ) {
+ reg = reg & ~mask;
+ reg |= (OUTPUTMODE_LVDS << bits_to_shift(mask));
+ }
+ return reg;
+}
+
+/**
+ * idt24x_read_from_hw - Get the current values on the hw
+ * @chip: Device data structure
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int idt24x_read_from_hw(struct clk_idt24x_chip *chip)
+{
+ int err;
+ struct i2c_client *client = chip->i2c_client;
+ u32 tmp, tmp2;
+ u8 output;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSM_INT_8,
+ &chip->reg_dsm_int_8);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSM_INT_8: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_int_8: 0x%x",
+ __func__, chip->reg_dsm_int_8);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_DSMFRAC_20_16_MASK,
+ &chip->reg_dsm_frac_20_16);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DSMFRAC_20_16_MASK: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_dsm_frac_20_16: 0x%x",
+ __func__, chip->reg_dsm_frac_20_16);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTEN, &chip->reg_out_en_x);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTEN: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_out_en_x: 0x%x",
+ __func__, chip->reg_out_en_x);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE0_1, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE0_1: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE0_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE1_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_0_1 original: 0x%x. After setting OUT0/1 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_0_1 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_OUTMODE2_3, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_OUTMODE2_3: %i",
+ __func__, err);
+ return err;
+ }
+
+ tmp2 = idt24x_set_output_mode(
+ tmp, IDT24x_REG_OUTMODE2_MASK, OUTPUTMODE_LVDS);
+ tmp2 = idt24x_set_output_mode(
+ tmp2, IDT24x_REG_OUTMODE3_MASK, OUTPUTMODE_LVDS);
+ dev_dbg(&client->dev,
+ "%s: reg_out_mode_2_3 original: 0x%x. After setting OUT2/3 to LVDS if necessary: 0x%x",
+ __func__, tmp, tmp2);
+ chip->reg_out_mode_2_3 = tmp2;
+
+ err = regmap_read(chip->regmap, IDT24x_REG_Q_DIS, &chip->reg_qx_dis);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_Q_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_qx_dis: 0x%x",
+ __func__, chip->reg_qx_dis);
+
+ err = regmap_read(chip->regmap, IDT24x_REG_NS1_Q0, &chip->reg_ns1_q0);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_NS1_Q0: %i",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(&client->dev, "%s: reg_ns1_q0: 0x%x",
+ __func__, chip->reg_ns1_q0);
+
+ for (output = 1; output <= 3; output++) {
+ struct clk_register_offsets offsets;
+
+ err = idt24x_get_offsets(output, &offsets);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error calling idt24x_get_offsets: %i",
+ __func__, err);
+ return err;
+ }
+
+ err = regmap_read(chip->regmap, offsets.n_17_16_offset,
+ &chip->reg_n_qx_17_16[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading n_17_16_offset for output %d (offset: 0x%x): %i",
+ __func__, output, offsets.n_17_16_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_n_qx_17_16[Q%u]: 0x%x",
+ __func__, output, chip->reg_n_qx_17_16[output - 1]);
+
+ err = regmap_read(chip->regmap, offsets.nfrac_27_24_offset,
+ &chip->reg_nfrac_qx_27_24[output - 1]);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading nfrac_27_24_offset for output %d (offset: 0x%x): %i",
+ __func__, output,
+ offsets.nfrac_27_24_offset, err);
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "%s: reg_nfrac_qx_27_24[Q%u]: 0x%x",
+ __func__, output,
+ chip->reg_nfrac_qx_27_24[output - 1]);
+ }
+
+ dev_info(&client->dev,
+ "%s: initial values read from chip successfully",
+ __func__);
+
+ /* Also read DBL_DIS to determine whether the doubler is disabled. */
+ err = regmap_read(chip->regmap, IDT24x_REG_DBL_DIS, &tmp);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: error reading IDT24x_REG_DBL_DIS: %i",
+ __func__, err);
+ return err;
+ }
+ chip->doubler_disabled = mask_and_shift(tmp, IDT24x_REG_DBL_DIS_MASK);
+ dev_dbg(&client->dev, "%s: doubler_disabled: %d",
+ __func__, chip->doubler_disabled);
+
+ return 0;
+}
+
+/**
+ * idt24x_set_rate - Sets the specified output clock to the specified rate.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g.,
+ * the VCO feeding the output)
+ *
+ * This function will call idt24_set_frequency, which means it will
+ * calculate divider for all requested outputs and update the attached
+ * device (issue I2C commands to update the registers).
+ *
+ * Return: 0 on success.
+ */
+static int idt24x_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+
+ /*
+ * hw->clk is the pointer to the specific output clock the user is
+ * requesting. We use hw to get back to the output structure for
+ * the output clock. Set the requested rate in the output structure.
+ * Note that container_of cannot be used to find the device structure
+ * (clk_idt24x_chip) from clk_hw, because clk_idt24x_chip has an array
+ * of idt24x_output structs. That is why it is necessary to use
+ * output->chip to access the device structure.
+ */
+ struct idt24x_output *output = to_idt24x_output(hw);
+ struct i2c_client *client = output->chip->i2c_client;
+
+ if (rate < output->chip->min_freq || rate > output->chip->max_freq) {
+ dev_err(&client->dev,
+ "requested frequency (%luHz) is out of range\n", rate);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the requested frequency in the output data structure, and then
+ * call idt24x_set_frequency. idt24x_set_frequency considers all
+ * requested frequencies when deciding on a vco frequency and
+ * calculating dividers.
+ */
+ output->requested = rate;
+
+ /*
+ * Also set in the memory location used by the debugfs file
+ * that exposes the output clock frequency. That allows querying
+ * the current rate via debugfs.
+ */
+ output->debug_freq = rate;
+
+ dev_info(&client->dev,
+ "%s. calling idt24x_set_frequency for Q%u. rate: %lu",
+ __func__, output->index, rate);
+ err = idt24x_set_frequency(output->chip);
+
+ if (err != 0)
+ dev_err(&client->dev, "error calling set_frequency: %d", err);
+
+ return err;
+}
+
+/**
+ * idt24x_round_rate - get valid rate that is closest to the requested rate
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @rate: the rate (in Hz) for the specified clock.
+ * @parent_rate:(not sure) the rate for a parent signal (e.g., the VCO
+ * feeding the output). This is an i/o param.
+ * If the driver supports a parent clock for the output (e.g.,
+ * the VCO(?), then set this param to indicate what the rate of
+ * the parent would be (e.g., the VCO frequency) if the rounded
+ * rate is used.
+ *
+ * Returns the closest rate to the requested rate actually supported by the
+ * chip.
+ *
+ * Return: adjusted rate
+ */
+static long idt24x_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * The chip has fractional output dividers, so assume it
+ * can provide the requested rate.
+ *
+ * TODO: figure out the closest rate that chip can support
+ * within a low error threshold and return that rate.
+ */
+ return rate;
+}
+
+/**
+ * idt24x_recalc_rate - return the frequency being provided by the clock.
+ * @hw: clk_hw struct that identifies the specific output clock.
+ * @parent_rate: (not sure) the rate for a parent signal (e.g., the
+ * VCO feeding the output)
+ *
+ * This API appears to be used to read the current values from the hardware
+ * and report the frequency being provided by the clock. Without this function,
+ * the clock will be initialized to 0 by default. The OS appears to be
+ * calling this to find out what the current value of the clock is at
+ * startup, so it can determine when .set_rate is actually changing the
+ * frequency.
+ *
+ * Return: the frequency of the specified clock.
+ */
+static unsigned long idt24x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct idt24x_output *output = to_idt24x_output(hw);
+
+ return output->requested;
+}
+
+/*
+ * Note that .prepare and .unprepare appear to be used more in Gates.
+ * They do not appear to be necessary for this device.
+ * Instead, update the device when .set_rate is called.
+ */
+static const struct clk_ops idt24x_clk_ops = {
+ .recalc_rate = idt24x_recalc_rate,
+ .round_rate = idt24x_round_rate,
+ .set_rate = idt24x_set_rate,
+};
+
+static bool idt24x_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool idt24x_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ return true;
+}
+
+static const struct regmap_config idt24x_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0xffff,
+ .writeable_reg = idt24x_regmap_is_writeable,
+ .volatile_reg = idt24x_regmap_is_volatile,
+};
+
+/**
+ * idt24x_clk_notifier_cb - Clock rate change callback
+ * @nb: Pointer to notifier block
+ * @event: Notification reason
+ * @data: Pointer to notification data object
+ *
+ * This function is called when the input clock frequency changes.
+ * The callback checks whether a valid bus frequency can be generated after the
+ * change. If so, the change is acknowledged, otherwise the change is aborted.
+ * New dividers are written to the HW in the pre- or post change notification
+ * depending on the scaling direction.
+ *
+ * Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
+ * to acknowledge the change, NOTIFY_DONE if the notification is
+ * considered irrelevant.
+ */
+static int idt24x_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_nb(nb);
+ int err = 0;
+
+ dev_info(&chip->i2c_client->dev,
+ "%s: input frequency changed: %lu Hz. event: %lu",
+ __func__, ndata->new_rate, event);
+
+ switch (event) {
+ case PRE_RATE_CHANGE: {
+ dev_dbg(&chip->i2c_client->dev, "PRE_RATE_CHANGE\n");
+ return NOTIFY_OK;
+ }
+ case POST_RATE_CHANGE:
+ chip->input_clk_freq = ndata->new_rate;
+ /*
+ * Can't call clock API clk_set_rate here; I believe
+ * it will be ignored if the rate is the same as we
+ * set previously. Need to call our internal function.
+ */
+ dev_dbg(&chip->i2c_client->dev,
+ "POST_RATE_CHANGE. Calling idt24x_set_frequency\n");
+ err = idt24x_set_frequency(chip);
+ if (err)
+ dev_err(&chip->i2c_client->dev,
+ "error calling idt24x_set_frequency (%i)\n",
+ err);
+ return NOTIFY_OK;
+ case ABORT_RATE_CHANGE:
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct clk_hw *of_clk_idt24x_get(
+ struct of_phandle_args *clkspec, void *_data)
+{
+ struct clk_idt24x_chip *chip = _data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(chip->clk)) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &chip->clk[idx].hw;
+}
+
+/**
+ * idt24x_probe - main entry point for ccf driver
+ * @client: pointer to i2c_client structure
+ * @id: pointer to i2c_device_id structure
+ *
+ * Main entry point function that gets called to initialize the driver.
+ *
+ * Return: 0 for success.
+ */
+static int idt24x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct clk_idt24x_chip *chip;
+ struct clk_init_data init;
+
+ int err = 0;
+ int x;
+ char buf[6];
+
+ dev_info(&client->dev, "%s", __func__);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ init.ops = &idt24x_clk_ops;
+ init.flags = 0;
+ init.num_parents = 0;
+ chip->i2c_client = client;
+
+ chip->min_freq = IDT24x_MIN_FREQ;
+ chip->max_freq = IDT24x_MAX_FREQ;
+
+ for (x = 0; x < NUM_INPUTS + 1; x++) {
+ char name[12];
+
+ sprintf(name, x == NUM_INPUTS ? "input-xtal" : "input-clk%i",
+ x);
+ dev_dbg(&client->dev, "attempting to get %s", name);
+ chip->input_clk = devm_clk_get(&client->dev, name);
+ if (IS_ERR(chip->input_clk)) {
+ err = PTR_ERR(chip->input_clk);
+ /*
+ * TODO: Handle EPROBE_DEFER error, which indicates
+ * that the input_clk isn't available now but may be
+ * later when the appropriate module is loaded.
+ */
+ } else {
+ err = 0;
+ chip->input_clk_num = x;
+ break;
+ }
+ }
+
+ if (err) {
+ dev_err(&client->dev, "Unable to get input clock (%u).", err);
+ chip->input_clk = NULL;
+ return err;
+ }
+
+ chip->input_clk_freq = clk_get_rate(chip->input_clk);
+ dev_dbg(&client->dev, "Got input-freq from input-clk in device tree: %uHz",
+ chip->input_clk_freq);
+
+ chip->input_clk_nb.notifier_call = idt24x_clk_notifier_cb;
+ if (clk_notifier_register(chip->input_clk, &chip->input_clk_nb))
+ dev_warn(&client->dev,
+ "Unable to register clock notifier for input_clk.");
+
+ dev_dbg(&client->dev, "%s: about to read settings: %zu",
+ __func__, ARRAY_SIZE(chip->settings));
+
+ err = of_property_read_u8_array(
+ client->dev.of_node, "settings", chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (!err) {
+ dev_dbg(&client->dev, "settings property specified in DT");
+ chip->has_settings = true;
+ } else {
+ if (err == -EOVERFLOW) {
+ dev_alert(&client->dev,
+ "EOVERFLOW error trying to read the settings. ARRAY_SIZE: %zu",
+ ARRAY_SIZE(chip->settings));
+ return err;
+ }
+ dev_dbg(&client->dev,
+ "settings property not specified in DT (or there was an error that can be ignored: %i). The settings property is optional.",
+ err);
+ }
+
+ /*
+ * Requested output frequencies cannot be specified in the DT.
+ * Either a consumer needs to use the clock API to request the rate,
+ * or use debugfs to set the rate from user space. Use clock-names in
+ * DT to specify the output clock.
+ */
+
+ chip->regmap = devm_regmap_init_i2c(client, &idt24x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(chip->regmap);
+ }
+
+ dev_dbg(&client->dev, "%s: call i2c_set_clientdata", __func__);
+ i2c_set_clientdata(client, chip);
+
+ if (chip->has_settings) {
+ /*
+ * A raw settings array was specified in the DT. Write the
+ * settings to the device immediately.
+ */
+ err = i2cwritebulk(
+ chip->i2c_client, chip->regmap, 0, chip->settings,
+ ARRAY_SIZE(chip->settings));
+ if (err) {
+ dev_err(&client->dev,
+ "error writing all settings to chip (%i)\n",
+ err);
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully wrote full settings array");
+ }
+
+ /*
+ * Whether or not settings were written to the device, read all
+ * current values from the hw.
+ */
+ dev_dbg(&client->dev, "read from HW");
+ err = idt24x_read_from_hw(chip);
+ if (err) {
+ dev_err(&client->dev,
+ "failed calling idt24x_read_from_hw (%i)\n", err);
+ return err;
+ }
+
+ /* Create all 4 clocks */
+ for (x = 0; x < NUM_OUTPUTS; x++) {
+ init.name = kasprintf(
+ GFP_KERNEL, "%s.Q%i", client->dev.of_node->name, x);
+ chip->clk[x].chip = chip;
+ chip->clk[x].hw.init = &init;
+ chip->clk[x].index = x;
+ err = devm_clk_hw_register(&client->dev, &chip->clk[x].hw);
+ kfree(init.name); /* clock framework made a copy of the name */
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+ dev_dbg(&client->dev, "successfully registered Q%i", x);
+ }
+
+ if (err) {
+ dev_err(&client->dev, "clock registration failed\n");
+ return err;
+ }
+
+ err = of_clk_add_hw_provider(
+ client->dev.of_node, of_clk_idt24x_get, chip);
+ if (err) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return err;
+ }
+
+ err = idt24x_expose_via_debugfs(client, chip);
+ if (err) {
+ dev_err(&client->dev,
+ "error calling idt24x_expose_via_debugfs: %i\n", err);
+ return err;
+ }
+
+ if (chip->input_clk_num == NUM_INPUTS)
+ sprintf(buf, "XTAL");
+ else
+ sprintf(buf, "CLK%i", chip->input_clk_num);
+ dev_info(&client->dev, "probe success. input freq: %uHz (%s), settings string? %s\n",
+ chip->input_clk_freq, buf,
+ chip->has_settings ? "true" : "false");
+ return 0;
+}
+
+static int idt24x_remove(struct i2c_client *client)
+{
+ struct clk_idt24x_chip *chip = to_clk_idt24x_from_client(&client);
+
+ dev_info(&client->dev, "%s", __func__);
+ of_clk_del_provider(client->dev.of_node);
+ idt24x_cleanup_debugfs(chip);
+
+ if (!chip->input_clk)
+ clk_notifier_unregister(
+ chip->input_clk, &chip->input_clk_nb);
+ return 0;
+}
+
+static const struct i2c_device_id idt24x_id[] = {
+ { "idt8t49n24x", idt24x },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, idt24x_id);
+
+static const struct of_device_id idt24x_of_match[] = {
+ { .compatible = "idt,idt8t49n241" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, idt24x_of_match);
+
+static struct i2c_driver idt24x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = idt24x_of_match,
+ },
+ .probe = idt24x_probe,
+ .remove = idt24x_remove,
+ .id_table = idt24x_id,
+};
+
+module_i2c_driver(idt24x_driver);
+
+MODULE_DESCRIPTION("8T49N24x ccf driver");
+MODULE_AUTHOR("David Cater <david.cater@idt.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/si5324.h b/drivers/clk/si5324.h
new file mode 100644
index 000000000000..b3826e7b2f84
--- /dev/null
+++ b/drivers/clk/si5324.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock generator platform data
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SI5324_H__
+#define __LINUX_PLATFORM_DATA_SI5324_H__
+
+/**
+ * enum si5324_pll_src - Si5324 pll clock source
+ *
+ * @SI5324_PLL_SRC_DEFAULT: Default, do not change eeprom config
+ * @SI5324_PLL_SRC_XTAL: Pll source clock is XTAL input
+ * @SI5324_PLL_SRC_CLKIN1: Pll source clock is CLKIN1 input
+ * @SI5324_PLL_SRC_CLKIN2: Pll source clock is CLKIN2 input
+ *
+ * Defines enums for clock sources.
+ */
+enum si5324_pll_src {
+ SI5324_PLL_SRC_XTAL = 0,
+ SI5324_PLL_SRC_CLKIN1 = 1,
+ SI5324_PLL_SRC_CLKIN2 = 2,
+};
+
+/**
+ * enum si5324_drive_strength - Si5324 clock output drive strength
+ *
+ * @SI5324_DRIVE_DEFAULT: Default, do not change eeprom config
+ * @SI5324_DRIVE_2MA: 2mA clock output drive strength
+ * @SI5324_DRIVE_4MA: 4mA clock output drive strength
+ * @SI5324_DRIVE_6MA: 6mA clock output drive strength
+ * @SI5324_DRIVE_8MA: 8mA clock output drive strength
+ *
+ * Defines enums for drive strength
+ */
+enum si5324_drive_strength {
+ SI5324_DRIVE_DEFAULT = 0,
+ SI5324_DRIVE_2MA = 2,
+ SI5324_DRIVE_4MA = 4,
+ SI5324_DRIVE_6MA = 6,
+ SI5324_DRIVE_8MA = 8,
+};
+
+/**
+ * struct si5324_clkout_config - Si5324 clock output configuration
+ *
+ * @drive: output drive strength
+ * @rate: clkout rate
+ */
+struct si5324_clkout_config {
+ enum si5324_drive_strength drive;
+ unsigned long rate;
+};
+
+/**
+ * struct si5324_platform_data - Platform data for the Si5324 clock driver
+ *
+ * @pll_src: Pll source clock setting
+ * @clkout: Array of clkout configuration
+ */
+struct si5324_platform_data {
+ enum si5324_pll_src pll_src;
+ struct si5324_clkout_config clkout[2];
+};
+
+#endif
diff --git a/drivers/clk/si5324drv.c b/drivers/clk/si5324drv.c
new file mode 100644
index 000000000000..5c064a329e73
--- /dev/null
+++ b/drivers/clk/si5324drv.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <vgannava.xilinx.com>
+ * Leon Woestenberg <leon@sidebranch.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include "si5324drv.h"
+
+/**
+ * si5324_rate_approx - Find closest rational approximation N2_LS/N3 fraction.
+ *
+ * @f: Holds the N2_LS/N3 fraction in 36.28 fixed point notation.
+ * @md: Holds the maximum denominator (N3) value allowed.
+ * @num: Store the numinator (N2_LS) found.
+ * @denom: Store the denominator (N3) found.
+ *
+ * This function finds the closest rational approximation.
+ * It allows only n/1 solution and as a part of the calculation
+ * multiply fraction until no digits after the decimal point and
+ * continued fraction and check denominator at each step.
+ */
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom)
+{
+ u64 a, h[3] = { 0, 1, 0 }, k[3] = { 1, 0, 0 };
+ u64 x, d, m, n = 1;
+ int i = 0;
+
+ if (md <= 1) {
+ *denom = 1;
+ *num = (u32)(f >> 28);
+ return;
+ }
+
+ n <<= 28;
+ for (i = 0; i < 28; i++) {
+ if ((f & 0x1) == 0) {
+ n >>= 1;
+ f >>= 1;
+ } else {
+ break;
+ }
+ }
+ d = f;
+
+ for (i = 0; i < 64; i++) {
+ a = n ? (div64_u64(d, n)) : 0;
+ if (i && !a)
+ break;
+ x = d;
+ d = n;
+ div64_u64_rem(x, n, &m);
+ n = m;
+ x = a;
+ if (k[1] * a + k[0] >= md) {
+ x = div64_u64((md - k[0]), k[1]);
+ if (x * 2 >= a || k[1] >= md)
+ i = 65;
+ else
+ break;
+ }
+ h[2] = x * h[1] + h[0];
+ h[0] = h[1];
+ h[1] = h[2];
+ k[2] = x * k[1] + k[0];
+ k[0] = k[1];
+ k[1] = k[2];
+ }
+
+ *denom = (u32)k[1];
+ *num = (u32)h[1];
+}
+
+/**
+ * si5324_find_n2ls - Search through the possible settings for the N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds the best setting for N2_LS and N3n with the values
+ * for N1_HS, NCn_LS, and N2_HS.
+ *
+ * Return: 1 when the best possible result has been found, 0 on failure.
+ */
+static int si5324_find_n2ls(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+ u64 f3_actual;
+ u64 fosc_actual;
+ u64 fout_actual;
+ u64 delta_fout;
+ u64 n2_ls_div_n3, mult_res;
+ u32 mult;
+
+ n2_ls_div_n3 = div64_u64(div64_u64(div64_u64(settings->fosc,
+ (settings->fin >> SI5324_FIN_FOUT_SHIFT)),
+ (u64)settings->n2_hs), (u64)2);
+
+ si5324_rate_approx(n2_ls_div_n3, settings->n31_max, &settings->n2_ls,
+ &settings->n31);
+ settings->n2_ls *= 2;
+
+ if (settings->n2_ls < settings->n2_ls_min) {
+ mult = div64_u64(settings->n2_ls_min, settings->n2_ls);
+ div64_u64_rem(settings->n2_ls_min, settings->n2_ls, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+
+ if (settings->n31 < settings->n31_min) {
+ mult = div64_u64(settings->n31_min, settings->n31);
+ div64_u64_rem(settings->n31_min, settings->n31, &mult_res);
+ mult = mult_res ? mult + 1 : mult;
+ settings->n2_ls *= mult;
+ settings->n31 *= mult;
+ }
+ pr_debug("Trying N2_LS = %d N3 = %d.\n", settings->n2_ls,
+ settings->n31);
+
+ if (settings->n2_ls < settings->n2_ls_min ||
+ settings->n2_ls > settings->n2_ls_max) {
+ pr_info("N2_LS out of range.\n");
+ } else if ((settings->n31 < settings->n31_min) ||
+ (settings->n31 > settings->n31_max)) {
+ pr_info("N3 out of range.\n");
+ } else {
+ f3_actual = div64_u64(settings->fin, settings->n31);
+ fosc_actual = f3_actual * settings->n2_hs * settings->n2_ls;
+ fout_actual = div64_u64(fosc_actual,
+ (settings->n1_hs * settings->nc1_ls));
+ delta_fout = fout_actual - settings->fout;
+
+ if ((f3_actual < ((u64)SI5324_F3_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (f3_actual > ((u64)SI5324_F3_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("F3 frequency out of range.\n");
+ } else if ((fosc_actual < ((u64)SI5324_FOSC_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fosc_actual > ((u64)SI5324_FOSC_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fosc frequency out of range.\n");
+ } else if ((fout_actual < ((u64)SI5324_FOUT_MIN) <<
+ SI5324_FIN_FOUT_SHIFT) ||
+ (fout_actual > ((u64)SI5324_FOUT_MAX) <<
+ SI5324_FIN_FOUT_SHIFT)) {
+ pr_debug("Fout frequency out of range.\n");
+ } else {
+ pr_debug("Found solution: fout = %dHz delta = %dHz.\n",
+ (u32)(fout_actual >> SI5324_FIN_FOUT_SHIFT),
+ (u32)(delta_fout >> SI5324_FIN_FOUT_SHIFT));
+ pr_debug("fosc = %dkHz f3 = %dHz.\n",
+ (u32)((fosc_actual >> SI5324_FIN_FOUT_SHIFT) /
+ 1000),
+ (u32)(f3_actual >> SI5324_FIN_FOUT_SHIFT));
+
+ if (((u64)abs(delta_fout)) <
+ settings->best_delta_fout) {
+ settings->best_n1_hs = settings->n1_hs;
+ settings->best_nc1_ls = settings->nc1_ls;
+ settings->best_n2_hs = settings->n2_hs;
+ settings->best_n2_ls = settings->n2_ls;
+ settings->best_n3 = settings->n31;
+ settings->best_fout = fout_actual;
+ settings->best_delta_fout = abs(delta_fout);
+ if (delta_fout == 0)
+ result = 1;
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * si5324_find_n2 - Find a valid setting for N2_HS and N2_LS.
+ *
+ * @settings: Holds the settings up till now.
+ *
+ * This function finds a valid settings for N2_HS and N2_LS. Iterates over
+ * all possibilities of N2_HS and then performs a binary search over the
+ * N2_LS values.
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_n2(struct si5324_settingst *settings)
+{
+ u32 result = 0;
+
+ for (settings->n2_hs = SI5324_N2_HS_MAX; settings->n2_hs >=
+ SI5324_N2_HS_MIN; settings->n2_hs--) {
+ pr_debug("Trying N2_HS = %d.\n", settings->n2_hs);
+ settings->n2_ls_min = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MAX * settings->n2_hs)
+ << SI5324_FIN_FOUT_SHIFT)));
+
+ if (settings->n2_ls_min < SI5324_N2_LS_MIN)
+ settings->n2_ls_min = SI5324_N2_LS_MIN;
+
+ settings->n2_ls_max = (u32)(div64_u64(settings->fosc,
+ ((u64)(SI5324_F3_MIN *
+ settings->n2_hs) <<
+ SI5324_FIN_FOUT_SHIFT)));
+ if (settings->n2_ls_max > SI5324_N2_LS_MAX)
+ settings->n2_ls_max = SI5324_N2_LS_MAX;
+
+ result = si5324_find_n2ls(settings);
+ if (result)
+ break;
+ }
+ return result;
+}
+
+/**
+ * si5324_calc_ncls_limits - Calculates the valid range for NCn_LS.
+ *
+ * @settings: Holds the input and output frequencies and the setting
+ * for N1_HS.
+ *
+ * This function calculates the valid range for NCn_LS with the value
+ * for the output frequency and N1_HS already set in settings.
+ *
+ * Return: -1 when there are no valid settings, 0 otherwise.
+ */
+int si5324_calc_ncls_limits(struct si5324_settingst *settings)
+{
+ settings->nc1_ls_min = div64_u64(settings->n1_hs_min,
+ settings->n1_hs);
+
+ if (settings->nc1_ls_min < SI5324_NC_LS_MIN)
+ settings->nc1_ls_min = SI5324_NC_LS_MIN;
+ if (settings->nc1_ls_min > 1 && (settings->nc1_ls_min & 0x1) == 1)
+ settings->nc1_ls_min++;
+ settings->nc1_ls_max = div64_u64(settings->n1_hs_max, settings->n1_hs);
+
+ if (settings->nc1_ls_max > SI5324_NC_LS_MAX)
+ settings->nc1_ls_max = SI5324_NC_LS_MAX;
+
+ if ((settings->nc1_ls_max & 0x1) == 1)
+ settings->nc1_ls_max--;
+ if ((settings->nc1_ls_max * settings->n1_hs < settings->n1_hs_min) ||
+ (settings->nc1_ls_min * settings->n1_hs > settings->n1_hs_max))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * si5324_find_ncls - Find a valid setting for NCn_LS
+ *
+ * @settings: Holds the input and output frequencies, the setting for
+ * N1_HS, and the limits for NCn_LS.
+ *
+ * This function find a valid setting for NCn_LS that can deliver the correct
+ * output frequency. Assumes that the valid range is relatively small
+ * so a full search can be done (should be true for video clock frequencies).
+ *
+ * Return: 1 when the best possible result has been found.
+ */
+static int si5324_find_ncls(struct si5324_settingst *settings)
+{
+ u64 fosc_1;
+ u32 result;
+
+ fosc_1 = settings->fout * settings->n1_hs;
+ for (settings->nc1_ls = settings->nc1_ls_min;
+ settings->nc1_ls <= settings->nc1_ls_max;) {
+ settings->fosc = fosc_1 * settings->nc1_ls;
+ pr_debug("Trying NCn_LS = %d: fosc = %dkHz.\n",
+ settings->nc1_ls,
+ (u32)(div64_u64((settings->fosc >>
+ SI5324_FIN_FOUT_SHIFT), 1000)));
+
+ result = si5324_find_n2(settings);
+ if (result)
+ break;
+ if (settings->nc1_ls == 1)
+ settings->nc1_ls++;
+ else
+ settings->nc1_ls += 2;
+ }
+ return result;
+}
+
+/**
+ * si5324_calcfreqsettings - Calculate the frequency settings
+ *
+ * @clkinfreq: Frequency of the input clock.
+ * @clkoutfreq: Desired output clock frequency.
+ * @clkactual: Actual clock frequency.
+ * @n1_hs: Set to the value for the N1_HS register.
+ * @ncn_ls: Set to the value for the NCn_LS register.
+ * @n2_hs: Set to the value for the N2_HS register.
+ * @n2_ls: Set to the value for the N2_LS register.
+ * @n3n: Set to the value for the N3n register.
+ * @bwsel: Set to the value for the BW_SEL register.
+ *
+ * This funciton calculates the frequency settings for the desired output
+ * frequency.
+ *
+ * Return: SI5324_SUCCESS for success, SI5324_ERR_FREQ when the
+ * requested frequency cannot be generated.
+ */
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs, u32 *n2_ls,
+ u32 *n3n, u8 *bwsel)
+{
+ struct si5324_settingst settings;
+ int result;
+
+ settings.fin = (u64)clkinfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.fout = (u64)clkoutfreq << SI5324_FIN_FOUT_SHIFT;
+ settings.best_delta_fout = settings.fout;
+
+ settings.n1_hs_min = (int)(div64_u64(SI5324_FOSC_MIN, clkoutfreq));
+ if (settings.n1_hs_min < SI5324_N1_HS_MIN * SI5324_NC_LS_MIN)
+ settings.n1_hs_min = SI5324_N1_HS_MIN * SI5324_NC_LS_MIN;
+
+ settings.n1_hs_max = (int)(div64_u64(SI5324_FOSC_MAX, clkoutfreq));
+ if (settings.n1_hs_max > SI5324_N1_HS_MAX * SI5324_NC_LS_MAX)
+ settings.n1_hs_max = SI5324_N1_HS_MAX * SI5324_NC_LS_MAX;
+
+ settings.n31_min = div64_u64(clkinfreq, SI5324_F3_MAX);
+ if (settings.n31_min < SI5324_N3_MIN)
+ settings.n31_min = SI5324_N3_MIN;
+
+ settings.n31_max = div64_u64(clkinfreq, SI5324_F3_MIN);
+ if (settings.n31_max > SI5324_N3_MAX)
+ settings.n31_max = SI5324_N3_MAX;
+
+ /* Find a valid oscillator frequency with the highest setting of N1_HS
+ * possible (reduces power)
+ */
+ for (settings.n1_hs = SI5324_N1_HS_MAX;
+ settings.n1_hs >= SI5324_N1_HS_MIN; settings.n1_hs--) {
+ pr_debug("Trying N1_HS = %d.\n", settings.n1_hs);
+
+ result = si5324_calc_ncls_limits(&settings);
+ if (result) {
+ pr_debug("No valid settings\n");
+ continue;
+ }
+ result = si5324_find_ncls(&settings);
+ if (result)
+ break;
+ }
+
+ pr_debug("Si5324: settings.best_delta_fout = %llu\n",
+ (unsigned long long)settings.best_delta_fout);
+ pr_debug("Si5324: settings.fout = %llu\n",
+ (unsigned long long)settings.fout);
+
+ if (settings.best_delta_fout == settings.fout) {
+ pr_debug("Si5324: No valid settings found.");
+ return SI5324_ERR_FREQ;
+ }
+ pr_debug("Si5324: Found solution: fout = %dHz.\n",
+ (u32)(settings.best_fout >> 28));
+
+ /* Post processing: convert temporary values to actual registers */
+ *n1_hs = (u8)settings.best_n1_hs - 4;
+ *ncn_ls = settings.best_nc1_ls - 1;
+ *n2_hs = (u8)settings.best_n2_hs - 4;
+ *n2_ls = settings.best_n2_ls - 1;
+ *n3n = settings.best_n3 - 1;
+ /*
+ * How must the bandwidth selection be determined?
+ * Not all settings will be valid.
+ * refclk 2, 0xA2, BWSEL_REG=1010 (?)
+ * free running 2, 0x42, BWSEL_REG=0100 (?)
+ */
+ *bwsel = 6;
+
+ if (clkactual)
+ *clkactual = (settings.best_fout >> SI5324_FIN_FOUT_SHIFT);
+
+ return SI5324_SUCCESS;
+}
diff --git a/drivers/clk/si5324drv.h b/drivers/clk/si5324drv.h
new file mode 100644
index 000000000000..28ea3050d5fb
--- /dev/null
+++ b/drivers/clk/si5324drv.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Si5324 clock driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ */
+
+#ifndef SI5324DRV_H_
+#define SI5324DRV_H_
+
+#include <linux/types.h>
+
+/******************************************************************************
+ * User settable defines that depend on the specific board design.
+ * The defaults are for the Xilinx KC705 board.
+ *****************************************************************************/
+
+#define SI5324_XTAL_FREQ 114285000UL
+
+/******************************************************************************
+ * Defines independent on the specific board design. Should not be changed.
+ *****************************************************************************/
+
+#define SI5324_SUCCESS 0 /*< Operation was successful */
+#define SI5324_ERR_IIC -1 /*< IIC error occurred */
+#define SI5324_ERR_FREQ -2 /*< Could not calculate frequency setting */
+#define SI5324_ERR_PARM -3 /*< Invalid parameter */
+
+#define SI5324_CLKSRC_CLK1 1 /*< Use clock input 1 */
+#define SI5324_CLKSRC_CLK2 2 /*< Use clock input 2 */
+#define SI5324_CLKSRC_XTAL 3 /*< Use crystal (free running mode) */
+
+#define SI5324_FOSC_MIN 4850000000UL /*< Min oscillator frequency */
+#define SI5324_FOSC_MAX 5670000000UL /*< Max oscillator frequency */
+#define SI5324_F3_MIN 10000 /*< Min phase detector frequency */
+#define SI5324_F3_MAX 2000000 /*< Max phase detector frequency */
+#define SI5324_FIN_MIN 2000 /*< Min input frequency */
+#define SI5324_FIN_MAX 710000000UL /*< Max input frequency */
+#define SI5324_FOUT_MIN 2000 /*< Min output frequency */
+#define SI5324_FOUT_MAX 945000000UL /*< Max output frequency */
+
+#define SI5324_N1_HS_MIN 6
+#define SI5324_N1_HS_MAX 11
+#define SI5324_NC_LS_MIN 1
+#define SI5324_NC_LS_MAX 0x100000
+#define SI5324_N2_HS_MIN 4
+#define SI5324_N2_HS_MAX 11
+#define SI5324_N2_LS_MIN 2 /* even values only */
+#define SI5324_N2_LS_MAX 0x100000
+#define SI5324_N3_MIN 1
+#define SI5324_N3_MAX 0x080000
+#define SI5324_FIN_FOUT_SHIFT 28
+
+struct si5324_settingst {
+ /* high-speed output divider */
+ u32 n1_hs_min;
+ u32 n1_hs_max;
+ u32 n1_hs;
+
+ /* low-speed output divider for clkout1 */
+ u32 nc1_ls_min;
+ u32 nc1_ls_max;
+ u32 nc1_ls;
+
+ /* low-speed output divider for clkout2 */
+ u32 nc2_ls_min;
+ u32 nc2_ls_max;
+ u32 nc2_ls;
+
+ /* high-speed feedback divider (PLL multiplier) */
+ u32 n2_hs;
+ /* low-speed feedback divider (PLL multiplier) */
+ u32 n2_ls_min;
+ u32 n2_ls_max;
+ u32 n2_ls;
+
+ /* input divider for clk1 */
+ u32 n31_min;
+ u32 n31_max;
+ u32 n31;
+
+ u64 fin;
+ u64 fout;
+ u64 fosc;
+ u64 best_delta_fout;
+ u64 best_fout;
+ u32 best_n1_hs;
+ u32 best_nc1_ls;
+ u32 best_n2_hs;
+ u32 best_n2_ls;
+ u32 best_n3;
+};
+
+int si5324_calcfreqsettings(u32 clkinfreq, u32 clkoutfreq, u32 *clkactual,
+ u8 *n1_hs, u32 *ncn_ls, u8 *n2_hs,
+ u32 *n2_ls, u32 *n3n, u8 *bwsel);
+void si5324_rate_approx(u64 f, u64 md, u32 *num, u32 *denom);
+int si5324_calc_ncls_limits(struct si5324_settingst *settings);
+
+#endif /* SI5324DRV_H_ */
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index ffbb9008c1c9..f7cde869c7f7 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -37,6 +37,7 @@ static void __iomem *zynq_clkc_base;
#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60)
#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64)
#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68)
+#define SLCR_TOPSW_CLK_CTRL (zynq_clkc_base + 0x6c)
#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70)
#define SLCR_621_TRUE (zynq_clkc_base + 0xc4)
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
@@ -99,6 +100,48 @@ static const char *const gem1_emio_input_names[] __initconst = {
static const char *const swdt_ext_clk_input_names[] __initconst = {
"swdt_ext_clk"};
+#ifdef CONFIG_SUSPEND
+static struct clk *iopll_save_parent;
+
+#define TOPSW_CLK_CTRL_DIS_MASK BIT(0)
+
+int zynq_clk_suspend_early(void)
+{
+ int ret;
+
+ iopll_save_parent = clk_get_parent(clks[iopll]);
+
+ ret = clk_set_parent(clks[iopll], ps_clk);
+ if (ret)
+ pr_info("%s: reparent iopll failed %d\n", __func__, ret);
+
+ return 0;
+}
+
+void zynq_clk_resume_late(void)
+{
+ clk_set_parent(clks[iopll], iopll_save_parent);
+}
+
+void zynq_clk_topswitch_enable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg &= ~TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+
+void zynq_clk_topswitch_disable(void)
+{
+ u32 reg;
+
+ reg = readl(SLCR_TOPSW_CLK_CTRL);
+ reg |= TOPSW_CLK_CTRL_DIS_MASK;
+ writel(reg, SLCR_TOPSW_CLK_CTRL);
+}
+#endif
+
static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
const char *clk_name, void __iomem *fclk_ctrl_reg,
const char **parents, int enable)
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index fec9a15c8786..143d73d6badc 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -25,11 +25,13 @@ enum topology_type {
* @type: Type of topology
* @flag: Topology flags
* @type_flag: Topology type specific flag
+ * @custom_type_flag: Topology type specific custome flag
*/
struct clock_topology {
u32 type;
u32 flag;
u32 type_flag;
+ u8 custom_type_flag;
};
struct clk_hw *zynqmp_clk_register_pll(const char *name, u32 clk_id,
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index b66c3a62233a..51b830225860 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -84,6 +84,7 @@ struct name_resp {
struct topology_resp {
#define CLK_TOPOLOGY_TYPE GENMASK(3, 0)
+#define CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS GENMASK(7, 4)
#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8)
#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24)
u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS];
@@ -396,6 +397,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology,
topology[*nnodes].type_flag =
FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
response->topology[i]);
+ topology[*nnodes].custom_type_flag =
+ FIELD_GET(CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS,
+ response->topology[i]);
(*nnodes)++;
}
@@ -666,6 +670,12 @@ static void zynqmp_get_clock_info(void)
continue;
clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]);
+
+ /* skip query for invalid clock */
+ ret = zynqmp_is_valid_clock(i);
+ if (ret != CLK_ATTR_VALID)
+ continue;
+
clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ?
CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 9bc4f9409aea..a687dcca9996 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -25,7 +25,8 @@
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
-#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CUSTOM_FLAG_CLK_FRAC BIT(0) /* has a fractional parent in custom type flag */
/**
* struct zynqmp_clk_divider - adjustable divider clock
@@ -34,10 +35,11 @@
* @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
+ * @max_div: Maximum divisor value allowed
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
- u8 flags;
+ u16 flags;
bool is_frac;
u32 clk_id;
u32 div_type;
@@ -318,7 +320,8 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
init.num_parents = 1;
/* struct clk_divider assignments */
- div->is_frac = !!(nodes->flag & CLK_FRAC);
+ div->is_frac = !!((nodes->flag & CLK_FRAC) |
+ (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC));
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 2c887e4d005a..c4a90d5f860b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -741,6 +741,27 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
+config CRYPTO_DEV_ZYNQMP_SHA3
+ tristate "Support for Xilinx ZynqMP SHA3 hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_HASH
+ help
+ Xilinx processors have SHA384 engine used for calculation
+ of hash. This driver interfaces with SHA3 hw accelerator.
+ Select this if you want to use the ZynqMP module for
+ Keccak-SHA384 algorithms.
+
+config CRYPTO_DEV_XILINX_RSA
+ tristate "Support for Xilinx ZynqMP RSA hw accelerator"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER
+ help
+ Xilinx processors have RSA hw accelerator used for signature
+ generation and verification. This driver interfaces with RSA
+ hw accelerator. Select this if you want to use the ZynqMP module
+ for RSA algorithms.
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 944ed7226e37..478464356ff6 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -47,6 +47,6 @@ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
-obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
+obj-y += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 534e32daf76a..21c67f9a210e 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
+obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
+obj-$(CONFIG_CRYPTO_DEV_XILINX_RSA) += zynqmp-rsa.o
diff --git a/drivers/crypto/xilinx/zynqmp-rsa.c b/drivers/crypto/xilinx/zynqmp-rsa.c
new file mode 100644
index 000000000000..bdfbaece000b
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-rsa.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_RSA_QUEUE_LENGTH 1
+#define ZYNQMP_RSA_MAX_KEY_SIZE 1024
+#define ZYNQMP_RSA_BLOCKSIZE 64
+
+struct zynqmp_rsa_dev;
+
+struct zynqmp_rsa_op {
+ struct zynqmp_rsa_dev *dd;
+ void *src;
+ void *dst;
+ int len;
+ u8 key[ZYNQMP_RSA_MAX_KEY_SIZE];
+ u8 *iv;
+ u32 keylen;
+};
+
+struct zynqmp_rsa_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ struct crypto_queue queue;
+};
+
+struct zynqmp_rsa_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_rsa_drv zynqmp_rsa = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_rsa.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_rsa.lock),
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static struct zynqmp_rsa_dev *zynqmp_rsa_find_dev(struct zynqmp_rsa_op *ctx)
+{
+ struct zynqmp_rsa_dev *rsa_dd = NULL;
+ struct zynqmp_rsa_dev *tmp;
+
+ spin_lock_bh(&zynqmp_rsa.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_rsa.dev_list, list) {
+ rsa_dd = tmp;
+ break;
+ }
+ ctx->dd = rsa_dd;
+ } else {
+ rsa_dd = ctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_rsa.lock);
+
+ return rsa_dd;
+}
+
+static int zynqmp_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct zynqmp_rsa_op *op = crypto_tfm_ctx(tfm);
+
+ op->keylen = len;
+ memcpy(op->key, key, len);
+ return 0;
+}
+
+static int zynqmp_rsa_xcrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, unsigned int flags)
+{
+ struct zynqmp_rsa_op *op = crypto_blkcipher_ctx(desc->tfm);
+ struct zynqmp_rsa_dev *dd = zynqmp_rsa_find_dev(op);
+ int err, datasize, src_data = 0, dst_data = 0;
+ struct blkcipher_walk walk;
+ char *kbuf;
+ size_t dma_size;
+ dma_addr_t dma_addr;
+
+ if (!eemi_ops->rsa)
+ return -ENOTSUPP;
+
+ dma_size = nbytes + op->keylen;
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ op->src = walk.src.virt.addr;
+ memcpy(kbuf + src_data, op->src, datasize);
+ src_data = src_data + datasize;
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ memcpy(kbuf + nbytes, op->key, op->keylen);
+ eemi_ops->rsa(dma_addr, nbytes, flags);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((datasize = walk.nbytes)) {
+ memcpy(walk.dst.virt.addr, kbuf + dst_data, datasize);
+ dst_data = dst_data + datasize;
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+ return err;
+}
+
+static int
+zynqmp_rsa_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 0);
+}
+
+static int
+zynqmp_rsa_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return zynqmp_rsa_xcrypt(desc, dst, src, nbytes, 1);
+}
+
+static struct crypto_alg zynqmp_alg = {
+ .cra_name = "xilinx-zynqmp-rsa",
+ .cra_driver_name = "zynqmp-rsa",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = ZYNQMP_RSA_BLOCKSIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_rsa_op),
+ .cra_alignmask = 15,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = 0,
+ .max_keysize = ZYNQMP_RSA_MAX_KEY_SIZE,
+ .setkey = zynqmp_setkey_blk,
+ .encrypt = zynqmp_rsa_encrypt,
+ .decrypt = zynqmp_rsa_decrypt,
+ .ivsize = 1,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_rsa_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-rsa" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_rsa_dt_ids);
+
+static int zynqmp_rsa_probe(struct platform_device *pdev)
+{
+ struct zynqmp_rsa_dev *rsa_dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ rsa_dd = devm_kzalloc(&pdev->dev, sizeof(*rsa_dd), GFP_KERNEL);
+ if (!rsa_dd)
+ return -ENOMEM;
+
+ rsa_dd->dev = dev;
+ platform_set_drvdata(pdev, rsa_dd);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ INIT_LIST_HEAD(&rsa_dd->list);
+ spin_lock_init(&rsa_dd->lock);
+ crypto_init_queue(&rsa_dd->queue, ZYNQMP_RSA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_rsa.lock);
+ list_add_tail(&rsa_dd->list, &zynqmp_rsa.dev_list);
+ spin_unlock(&zynqmp_rsa.lock);
+
+ ret = crypto_register_alg(&zynqmp_alg);
+ if (ret)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_rsa.lock);
+ list_del(&rsa_dd->list);
+ spin_unlock(&zynqmp_rsa.lock);
+ dev_err(dev, "initialization failed.\n");
+ return ret;
+}
+
+static int zynqmp_rsa_remove(struct platform_device *pdev)
+{
+ crypto_unregister_alg(&zynqmp_alg);
+ return 0;
+}
+
+static struct platform_driver xilinx_rsa_driver = {
+ .probe = zynqmp_rsa_probe,
+ .remove = zynqmp_rsa_remove,
+ .driver = {
+ .name = "zynqmp_rsa",
+ .of_match_table = of_match_ptr(zynqmp_rsa_dt_ids),
+ },
+};
+
+module_platform_driver(xilinx_rsa_driver);
+
+MODULE_DESCRIPTION("ZynqMP RSA hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
new file mode 100644
index 000000000000..be66a77f3af2
--- /dev/null
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_SHA3_INIT 1
+#define ZYNQMP_SHA3_UPDATE 2
+#define ZYNQMP_SHA3_FINAL 4
+
+#define ZYNQMP_SHA_QUEUE_LENGTH 1
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+struct zynqmp_sha_dev;
+
+/*
+ * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct zynqmp_sha_reqctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_ctx {
+ struct zynqmp_sha_dev *dd;
+ unsigned long flags;
+};
+
+struct zynqmp_sha_dev {
+ struct list_head list;
+ struct device *dev;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+ int err;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct zynqmp_sha_drv {
+ struct list_head dev_list;
+ /* the lock protects queue and dev list*/
+ spinlock_t lock;
+};
+
+static struct zynqmp_sha_drv zynqmp_sha = {
+ .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock),
+};
+
+static int zynqmp_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct zynqmp_sha_dev *dd = NULL;
+ struct zynqmp_sha_dev *tmp;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ spin_lock_bh(&zynqmp_sha.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &zynqmp_sha.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+ spin_unlock_bh(&zynqmp_sha.lock);
+
+ ctx->dd = dd;
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ ret = eemi_ops->sha_hash(0, 0, ZYNQMP_SHA3_INIT);
+
+ return ret;
+}
+
+static int zynqmp_sha_update(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = req->nbytes;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!req->nbytes)
+ return 0;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0);
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ ret = eemi_ops->sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_final(struct ahash_request *req)
+{
+ struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct zynqmp_sha_dev *dd = tctx->dd;
+ char *kbuf;
+ size_t dma_size = SHA384_DIGEST_SIZE;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (!eemi_ops->sha_hash)
+ return -ENOTSUPP;
+
+ kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = eemi_ops->sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL);
+ memcpy(req->result, kbuf, 48);
+ dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_sha_finup(struct ahash_request *req)
+{
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_digest(struct ahash_request *req)
+{
+ zynqmp_sha_init(req);
+ zynqmp_sha_update(req);
+ zynqmp_sha_final(req);
+
+ return 0;
+}
+
+static int zynqmp_sha_export(struct ahash_request *req, void *out)
+{
+ const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
+static int zynqmp_sha_cra_init(struct crypto_tfm *tfm)
+{
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct zynqmp_sha_reqctx));
+
+ return 0;
+}
+
+static struct ahash_alg sha3_alg = {
+ .init = zynqmp_sha_init,
+ .update = zynqmp_sha_update,
+ .final = zynqmp_sha_final,
+ .finup = zynqmp_sha_finup,
+ .digest = zynqmp_sha_digest,
+ .export = zynqmp_sha_export,
+ .import = zynqmp_sha_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "xilinx-keccak-384",
+ .cra_driver_name = "zynqmp-keccak-384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct zynqmp_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = zynqmp_sha_cra_init,
+ }
+ }
+};
+
+static const struct of_device_id zynqmp_sha_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-keccak-384" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids);
+
+static int zynqmp_sha_probe(struct platform_device *pdev)
+{
+ struct zynqmp_sha_dev *sha_dd;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
+ if (!sha_dd)
+ return -ENOMEM;
+
+ sha_dd->dev = dev;
+ platform_set_drvdata(pdev, sha_dd);
+ INIT_LIST_HEAD(&sha_dd->list);
+ spin_lock_init(&sha_dd->lock);
+ crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH);
+ spin_lock(&zynqmp_sha.lock);
+ list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err < 0)
+ dev_err(dev, "no usable DMA configuration");
+
+ err = crypto_register_ahash(&sha3_alg);
+ if (err)
+ goto err_algs;
+
+ return 0;
+
+err_algs:
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int zynqmp_sha_remove(struct platform_device *pdev)
+{
+ static struct zynqmp_sha_dev *sha_dd;
+
+ sha_dd = platform_get_drvdata(pdev);
+
+ if (!sha_dd)
+ return -ENODEV;
+
+ spin_lock(&zynqmp_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&zynqmp_sha.lock);
+
+ crypto_unregister_ahash(&sha3_alg);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_sha_driver = {
+ .probe = zynqmp_sha_probe,
+ .remove = zynqmp_sha_remove,
+ .driver = {
+ .name = "zynqmp-keccak-384",
+ .of_match_table = of_match_ptr(zynqmp_sha_dt_ids),
+ },
+};
+
+module_platform_driver(zynqmp_sha_driver);
+
+MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 023db6883d05..98080d0b2d2a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -709,6 +709,18 @@ config XILINX_ZYNQMP_DMA
help
Enable support for Xilinx ZynqMP DMA controller.
+config XILINX_PS_PCIE_DMA
+ tristate "Xilinx PS PCIe DMA support"
+ depends on (PCI && X86_64 || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for the Xilinx PS PCIe DMA engine present
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
config ZX_DMA
tristate "ZTE ZX DMA support"
depends on ARCH_ZX || COMPILE_TEST
@@ -739,6 +751,8 @@ source "drivers/dma/ti/Kconfig"
source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+source "drivers/dma/xilinx/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
@@ -765,4 +779,18 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config XILINX_DMATEST
+ tristate "DMA Test client for AXI DMA"
+ depends on XILINX_DMA
+ help
+ Simple DMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
+config XILINX_VDMATEST
+ tristate "DMA Test client for VDMA"
+ depends on XILINX_DMA
+ help
+ Simple xilinx VDMA test client. Say N unless you're debugging a
+ DMA Device driver.
+
endif
diff --git a/drivers/dma/xilinx/Kconfig b/drivers/dma/xilinx/Kconfig
new file mode 100644
index 000000000000..fa79a5e92880
--- /dev/null
+++ b/drivers/dma/xilinx/Kconfig
@@ -0,0 +1,47 @@
+#
+# XILINX DMA Engines configuration
+#
+
+menuconfig XILINX_DMA_ENGINES
+ bool "Xilinx DMA Engines"
+ help
+ Enable support for the Xilinx DMA controllers. It supports three DMA
+ engines: Axi Central DMA (memory to memory transfer), Axi DMA (memory and
+ device transfer), and Axi VDMA (memory and video device transfer).
+
+if XILINX_DMA_ENGINES
+
+config XILINX_DPDMA
+ tristate "Xilinx DPDMA Engine"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx DisplayPort DMA.
+
+config XILINX_DPDMA_DEBUG_FS
+ bool "Xilinx DPDMA debugfs"
+ depends on DEBUG_FS && XILINX_DPDMA
+ help
+ Enable the debugfs code for DPDMA driver. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config XILINX_PS_PCIE_DMA_TEST
+ tristate "Xilinx PS PCIe DMA test client"
+ depends on XILINX_PS_PCIE_DMA
+ help
+ Enable support for the test client of Xilinx PS PCIe DMA engine
+ in recent Xilinx ZynqMP chipsets.
+
+ Say Y here if you have such a chipset.
+
+ If unsure, say N.
+
+endif # XILINX_DMA_ENGINES
+
+config XILINX_FRMBUF
+ tristate "Xilinx Framebuffer"
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx Framebuffer DMA.
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index e921de575b55..ab3a5ae6e1e3 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,3 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_XILINX_DMATEST) += axidmatest.o
+obj-$(CONFIG_XILINX_VDMATEST) += vdmatest.o
+obj-$(CONFIG_XILINX_DPDMA) += xilinx_dpdma.o
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
+xilinx_ps_pcie_dma-objs := xilinx_ps_pcie_main.o xilinx_ps_pcie_platform.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA) += xilinx_ps_pcie_dma.o
+obj-$(CONFIG_XILINX_PS_PCIE_DMA_TEST) += xilinx_ps_pcie_dma_client.o
+obj-$(CONFIG_XILINX_FRMBUF) += xilinx_frmbuf.o
diff --git a/drivers/dma/xilinx/axidmatest.c b/drivers/dma/xilinx/axidmatest.c
new file mode 100644
index 000000000000..63ef98510712
--- /dev/null
+++ b/drivers/dma/xilinx/axidmatest.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * XILINX AXI DMA Engine test module
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched/task.h>
+#include <linux/dma/xilinx_dma.h>
+
+static unsigned int test_buf_size = 16384;
+module_param(test_buf_size, uint, 0444);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 5;
+module_param(iterations, uint, 0444);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+struct dmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+struct dmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/*
+ * These are protected by dma_list_mutex since they're only used by
+ * the DMA filter function callback
+ */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(dmatest_channels);
+static unsigned int nr_channels;
+
+static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
+{
+ unsigned long long per_sec = 1000000;
+
+ if (runtime <= 0)
+ return 0;
+
+ /* drop precision until runtime is 32-bits */
+ while (runtime > UINT_MAX) {
+ runtime >>= 1;
+ per_sec <<= 1;
+ }
+
+ per_sec *= val;
+ do_div(per_sec, runtime);
+ return per_sec;
+}
+
+static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
+{
+ return dmatest_persec(runtime, len >> 10);
+}
+
+static bool is_threaded_test_run(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static unsigned long dmatest_random(void)
+{
+ unsigned long buf;
+
+ get_random_bytes(&buf, sizeof(buf));
+ return buf;
+}
+
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY) &&
+ (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter,
+ u8 pattern, bool is_srcbuf)
+{
+ unsigned int i;
+ unsigned int error_count = 0;
+ u8 actual;
+ u8 expected;
+ u8 *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void dmatest_slave_tx_callback(void *completion)
+{
+ complete(completion);
+}
+
+static void dmatest_slave_rx_callback(void *completion)
+{
+ complete(completion);
+}
+
+/* Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int dmatest_slave_func(void *data)
+{
+ struct dmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ const char *thread_name;
+ unsigned int src_off, dst_off, len;
+ unsigned int error_count;
+ unsigned int failed_tests = 0;
+ unsigned int total_tests = 0;
+ dma_cookie_t tx_cookie;
+ dma_cookie_t rx_cookie;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret;
+ int src_cnt;
+ int dst_cnt;
+ int bd_cnt = 11;
+ int i;
+
+ ktime_t ktime, start, diff;
+ ktime_t filltime = 0;
+ ktime_t comparetime = 0;
+ s64 runtime = 0;
+ unsigned long long total_len = 0;
+ thread_name = current->comm;
+ ret = -ENOMEM;
+
+
+ /* Ensure that all previous reads are complete */
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+ dst_cnt = bd_cnt;
+ src_cnt = bd_cnt;
+
+ thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < src_cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+ thread->srcs[i] = NULL;
+
+ thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < dst_cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+ thread->dsts[i] = NULL;
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ ktime = ktime_get();
+ while (!kthread_should_stop() &&
+ !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ dma_addr_t dma_srcs[src_cnt];
+ dma_addr_t dma_dsts[dst_cnt];
+ struct completion rx_cmp;
+ struct completion tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(300000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+ struct scatterlist tx_sg[bd_cnt];
+ struct scatterlist rx_sg[bd_cnt];
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = dmatest_random() % test_buf_size + 1;
+ len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ total_len += len;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst_off = dmatest_random() % (test_buf_size - len + 1);
+
+ src_off = (src_off >> align) << align;
+ dst_off = (dst_off >> align) << align;
+
+ start = ktime_get();
+ dmatest_init_srcs(thread->srcs, src_off, len);
+ dmatest_init_dsts(thread->dsts, dst_off, len);
+ diff = ktime_sub(ktime_get(), start);
+ filltime = ktime_add(filltime, diff);
+
+ for (i = 0; i < src_cnt; i++) {
+ u8 *buf = thread->srcs[i] + src_off;
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+ }
+
+ for (i = 0; i < dst_cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ sg_init_table(tx_sg, bd_cnt);
+ sg_init_table(rx_sg, bd_cnt);
+
+ for (i = 0; i < bd_cnt; i++) {
+ sg_dma_address(&tx_sg[i]) = dma_srcs[i];
+ sg_dma_address(&rx_sg[i]) = dma_dsts[i] + dst_off;
+
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
+ rxd = rx_dev->device_prep_slave_sg(rx_chan, rx_sg, bd_cnt,
+ DMA_DEV_TO_MEM, flags, NULL);
+
+ txd = tx_dev->device_prep_slave_sg(tx_chan, tx_sg, bd_cnt,
+ DMA_MEM_TO_DEV, flags, NULL);
+
+ if (!rxd || !txd) {
+ for (i = 0; i < src_cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < dst_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
+ pr_warn("%s: #%u: prep error with src_off=0x%x ",
+ thread_name, total_tests - 1, src_off);
+ pr_warn("dst_off=0x%x len=0x%x\n",
+ dst_off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = dmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+ rx_cookie = rxd->tx_submit(rxd);
+
+ init_completion(&tx_cmp);
+ txd->callback = dmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+ tx_cookie = txd->tx_submit(txd);
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn("%s: #%u: submit error %d/%d with src_off=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, src_off);
+ pr_warn("dst_off=0x%x len=0x%x\n",
+ dst_off, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(rx_chan);
+ dma_async_issue_pending(tx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn("%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn("%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < dst_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_BIDIRECTIONAL);
+
+ error_count = 0;
+ start = ktime_get();
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += dmatest_verify(thread->srcs, 0, src_off,
+ 0, PATTERN_SRC, true);
+ error_count += dmatest_verify(thread->srcs, src_off,
+ src_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, true);
+ error_count += dmatest_verify(thread->srcs, src_off + len,
+ test_buf_size, src_off + len,
+ PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += dmatest_verify(thread->dsts, 0, dst_off,
+ 0, PATTERN_DST, false);
+ error_count += dmatest_verify(thread->dsts, dst_off,
+ dst_off + len, src_off,
+ PATTERN_SRC | PATTERN_COPY, false);
+ error_count += dmatest_verify(thread->dsts, dst_off + len,
+ test_buf_size, dst_off + len,
+ PATTERN_DST, false);
+ diff = ktime_sub(ktime_get(), start);
+ comparetime = ktime_add(comparetime, diff);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with ",
+ thread_name, total_tests - 1, error_count);
+ pr_warn("src_off=0x%x dst_off=0x%x len=0x%x\n",
+ src_off, dst_off, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with ",
+ thread_name, total_tests - 1);
+ pr_debug("src_off=0x%x dst_off=0x%x len=0x%x\n",
+ src_off, dst_off, len);
+ }
+ }
+
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+ ktime = ktime_sub(ktime, filltime);
+ runtime = ktime_to_us(ktime);
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures %llu iops %llu KB/s (status %d)\n",
+ thread_name, total_tests, failed_tests,
+ dmatest_persec(runtime, total_tests),
+ dmatest_KBs(runtime, total_len), ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dmatest_slave_thread *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_debug("dmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int dmatest_add_slave_threads(struct dmatest_chan *tx_dtc,
+ struct dmatest_chan *rx_dtc)
+{
+ struct dmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+ int ret;
+
+ thread = kzalloc(sizeof(struct dmatest_slave_thread), GFP_KERNEL);
+ if (!thread) {
+ pr_warn("dmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ }
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+
+ /* Ensure that all previous writes are complete */
+ smp_wmb();
+ thread->task = kthread_run(dmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan),
+ dma_chan_name(rx_chan));
+ ret = PTR_ERR(thread->task);
+ if (IS_ERR(thread->task)) {
+ pr_warn("dmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return ret;
+ }
+
+ /* srcbuf and dstbuf are allocated by the thread itself */
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int dmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct dmatest_chan *tx_dtc;
+ struct dmatest_chan *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!tx_dtc) {
+ pr_warn("dmatest: No memory for tx %s\n",
+ dma_chan_name(tx_chan));
+ return -ENOMEM;
+ }
+
+ rx_dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!rx_dtc) {
+ pr_warn("dmatest: No memory for rx %s\n",
+ dma_chan_name(rx_chan));
+ return -ENOMEM;
+ }
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ dmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("dmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &dmatest_channels);
+ list_add_tail(&rx_dtc->node, &dmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_axidmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ chan = dma_request_chan(&pdev->dev, "axidma0");
+ if (IS_ERR(chan)) {
+ err = PTR_ERR(chan);
+ if (err != -EPROBE_DEFER)
+ pr_err("xilinx_dmatest: No Tx channel\n");
+ return err;
+ }
+
+ rx_chan = dma_request_chan(&pdev->dev, "axidma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ if (err != -EPROBE_DEFER)
+ pr_err("xilinx_dmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = dmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_dmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_axidmatest_remove(struct platform_device *pdev)
+{
+ struct dmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ dmatest_cleanup_channel(dtc);
+ pr_info("xilinx_dmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_axidmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_axidmatest_driver = {
+ .driver = {
+ .name = "xilinx_axidmatest",
+ .of_match_table = xilinx_axidmatest_of_ids,
+ },
+ .probe = xilinx_axidmatest_probe,
+ .remove = xilinx_axidmatest_remove,
+};
+
+static int __init axidma_init(void)
+{
+ return platform_driver_register(&xilinx_axidmatest_driver);
+}
+late_initcall(axidma_init);
+
+static void __exit axidma_exit(void)
+{
+ platform_driver_unregister(&xilinx_axidmatest_driver);
+}
+module_exit(axidma_exit)
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI DMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/vdmatest.c b/drivers/dma/xilinx/vdmatest.c
new file mode 100644
index 000000000000..58d9e6e329e2
--- /dev/null
+++ b/drivers/dma/xilinx/vdmatest.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * XILINX VDMA Engine test client driver
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on Atmel DMA Test Client
+ *
+ * Description:
+ * This is a simple Xilinx VDMA test client for AXI VDMA driver.
+ * This test assumes both the channels of VDMA are enabled in the
+ * hardware design and configured in back-to-back connection. Test
+ * starts by pumping the data onto one channel (MM2S) and then
+ * compares the data that is received on the other channel (S2MM).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/dma/xilinx_dma.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/sched/task.h>
+#include <linux/wait.h>
+
+static unsigned int test_buf_size = 64;
+module_param(test_buf_size, uint, 0444);
+MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0444);
+MODULE_PARM_DESC(iterations,
+ "Iterations before stopping test (default: infinite)");
+
+static unsigned int hsize = 64;
+module_param(hsize, uint, 0444);
+MODULE_PARM_DESC(hsize, "Horizontal size in bytes");
+
+static unsigned int vsize = 32;
+module_param(vsize, uint, 0444);
+MODULE_PARM_DESC(vsize, "Vertical size in bytes");
+
+/*
+ * Initialization patterns. All bytes in the source buffer has bit 7
+ * set, all bytes in the destination buffer has bit 7 cleared.
+ *
+ * Bit 6 is set for all bytes which are to be copied by the DMA
+ * engine. Bit 5 is set for all bytes which are to be overwritten by
+ * the DMA engine.
+ *
+ * The remaining bits are the inverse of a counter which increments by
+ * one for each byte address.
+ */
+#define PATTERN_SRC 0x80
+#define PATTERN_DST 0x00
+#define PATTERN_COPY 0x40
+#define PATTERN_OVERWRITE 0x20
+#define PATTERN_COUNT_MASK 0x1f
+
+/* Maximum number of frame buffers */
+#define MAX_NUM_FRAMES 32
+
+/**
+ * struct vdmatest_slave_thread - VDMA test thread
+ * @node: Thread node
+ * @task: Task structure pointer
+ * @tx_chan: Tx channel pointer
+ * @rx_chan: Rx Channel pointer
+ * @srcs: Source buffer
+ * @dsts: Destination buffer
+ * @type: DMA transaction type
+ */
+struct xilinx_vdmatest_slave_thread {
+ struct list_head node;
+ struct task_struct *task;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
+ bool done;
+};
+
+/**
+ * struct vdmatest_chan - VDMA Test channel
+ * @node: Channel node
+ * @chan: DMA channel pointer
+ * @threads: List of VDMA test threads
+ */
+struct xilinx_vdmatest_chan {
+ struct list_head node;
+ struct dma_chan *chan;
+ struct list_head threads;
+};
+
+/* Global variables */
+static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+static LIST_HEAD(xilinx_vdmatest_channels);
+static unsigned int nr_channels;
+static unsigned int frm_cnt;
+static dma_addr_t dma_srcs[MAX_NUM_FRAMES];
+static dma_addr_t dma_dsts[MAX_NUM_FRAMES];
+static struct dma_interleaved_template xt;
+
+static bool is_threaded_test_run(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ int ret = false;
+
+ list_for_each_entry(thread, &tx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+
+ list_for_each_entry(thread, &rx_dtc->threads, node) {
+ if (!thread->done)
+ ret = true;
+ }
+ return ret;
+}
+
+static void xilinx_vdmatest_init_srcs(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
+}
+
+static void xilinx_vdmatest_init_dsts(u8 **bufs, unsigned int start,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for (; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for (; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
+}
+
+static void xilinx_vdmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
+ unsigned int counter, bool is_srcbuf)
+{
+ u8 diff = actual ^ pattern;
+ u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ const char *thread_name = current->comm;
+
+ if (is_srcbuf)
+ pr_warn(
+ "%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if ((pattern & PATTERN_COPY)
+ && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
+ pr_warn(
+ "%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else if (diff & PATTERN_SRC)
+ pr_warn(
+ "%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+ else
+ pr_warn(
+ "%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
+ thread_name, index, expected, actual);
+}
+
+static unsigned int xilinx_vdmatest_verify(u8 **bufs, unsigned int start,
+ unsigned int end, unsigned int counter, u8 pattern,
+ bool is_srcbuf)
+{
+ unsigned int i, error_count = 0;
+ u8 actual, expected, *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ xilinx_vdmatest_mismatch(actual,
+ pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
+ }
+ }
+
+ if (error_count > 32)
+ pr_warn("%s: %u errors suppressed\n",
+ current->comm, error_count - 32);
+
+ return error_count;
+}
+
+static void xilinx_vdmatest_slave_tx_callback(void *completion)
+{
+ pr_debug("Got tx callback\n");
+ complete(completion);
+}
+
+static void xilinx_vdmatest_slave_rx_callback(void *completion)
+{
+ pr_debug("Got rx callback\n");
+ complete(completion);
+}
+
+/*
+ * Function for slave transfers
+ * Each thread requires 2 channels, one for transmit, and one for receive
+ */
+static int xilinx_vdmatest_slave_func(void *data)
+{
+ struct xilinx_vdmatest_slave_thread *thread = data;
+ struct dma_chan *tx_chan, *rx_chan;
+ const char *thread_name;
+ unsigned int len, error_count;
+ unsigned int failed_tests = 0, total_tests = 0;
+ dma_cookie_t tx_cookie = 0, rx_cookie = 0;
+ enum dma_status status;
+ enum dma_ctrl_flags flags;
+ int ret = -ENOMEM, i;
+ struct xilinx_vdma_config config;
+
+ thread_name = current->comm;
+
+ /* Limit testing scope here */
+ test_buf_size = hsize * vsize;
+
+ /* This barrier ensures 'thread' is initialized and
+ * we get valid DMA channels
+ */
+ smp_rmb();
+ tx_chan = thread->tx_chan;
+ rx_chan = thread->rx_chan;
+
+ thread->srcs = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+
+ thread->dsts = kcalloc(frm_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < frm_cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ while (!kthread_should_stop()
+ && !(iterations && total_tests >= iterations)) {
+ struct dma_device *tx_dev = tx_chan->device;
+ struct dma_device *rx_dev = rx_chan->device;
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct dma_async_tx_descriptor *rxd = NULL;
+ struct completion rx_cmp, tx_cmp;
+ unsigned long rx_tmo =
+ msecs_to_jiffies(30000); /* RX takes longer */
+ unsigned long tx_tmo = msecs_to_jiffies(30000);
+ u8 align = 0;
+
+ total_tests++;
+
+ /* honor larger alignment restrictions */
+ align = tx_dev->copy_align;
+ if (rx_dev->copy_align > align)
+ align = rx_dev->copy_align;
+
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = test_buf_size;
+ xilinx_vdmatest_init_srcs(thread->srcs, 0, len);
+ xilinx_vdmatest_init_dsts(thread->dsts, 0, len);
+
+ /* Zero out configuration */
+ memset(&config, 0, sizeof(struct xilinx_vdma_config));
+
+ /* Set up hardware configuration information */
+ config.frm_cnt_en = 1;
+ config.coalesc = frm_cnt * 10;
+ config.park = 1;
+ xilinx_vdma_channel_set_config(tx_chan, &config);
+
+ xilinx_vdma_channel_set_config(rx_chan, &config);
+
+ for (i = 0; i < frm_cnt; i++) {
+ dma_dsts[i] = dma_map_single(rx_dev->dev,
+ thread->dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+
+ if (dma_mapping_error(rx_dev->dev, dma_dsts[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.dst_start = dma_dsts[i];
+ xt.dir = DMA_DEV_TO_MEM;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ rxd = rx_dev->device_prep_interleaved_dma(rx_chan,
+ &xt, flags);
+ rx_cookie = rxd->tx_submit(rxd);
+ }
+
+ for (i = 0; i < frm_cnt; i++) {
+ u8 *buf = thread->srcs[i];
+
+ dma_srcs[i] = dma_map_single(tx_dev->dev, buf, len,
+ DMA_MEM_TO_DEV);
+
+ if (dma_mapping_error(tx_dev->dev, dma_srcs[i])) {
+ failed_tests++;
+ continue;
+ }
+ xt.src_start = dma_srcs[i];
+ xt.dir = DMA_MEM_TO_DEV;
+ xt.numf = vsize;
+ xt.sgl[0].size = hsize;
+ xt.sgl[0].icg = 0;
+ xt.frame_size = 1;
+ txd = tx_dev->device_prep_interleaved_dma(tx_chan,
+ &xt, flags);
+ tx_cookie = txd->tx_submit(txd);
+ }
+
+ if (!rxd || !txd) {
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(tx_dev->dev, dma_srcs[i], len,
+ DMA_MEM_TO_DEV);
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_DEV_TO_MEM);
+ pr_warn("%s: #%u: prep error with len=0x%x ",
+ thread_name, total_tests - 1, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+
+ init_completion(&rx_cmp);
+ rxd->callback = xilinx_vdmatest_slave_rx_callback;
+ rxd->callback_param = &rx_cmp;
+
+ init_completion(&tx_cmp);
+ txd->callback = xilinx_vdmatest_slave_tx_callback;
+ txd->callback_param = &tx_cmp;
+
+ if (dma_submit_error(rx_cookie) ||
+ dma_submit_error(tx_cookie)) {
+ pr_warn("%s: #%u: submit error %d/%d with len=0x%x ",
+ thread_name, total_tests - 1,
+ rx_cookie, tx_cookie, len);
+ msleep(100);
+ failed_tests++;
+ continue;
+ }
+ dma_async_issue_pending(tx_chan);
+ dma_async_issue_pending(rx_chan);
+
+ tx_tmo = wait_for_completion_timeout(&tx_cmp, tx_tmo);
+
+ status = dma_async_is_tx_complete(tx_chan, tx_cookie,
+ NULL, NULL);
+
+ if (tx_tmo == 0) {
+ pr_warn("%s: #%u: tx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: tx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ rx_tmo = wait_for_completion_timeout(&rx_cmp, rx_tmo);
+ status = dma_async_is_tx_complete(rx_chan, rx_cookie,
+ NULL, NULL);
+
+ if (rx_tmo == 0) {
+ pr_warn("%s: #%u: rx test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_COMPLETE) {
+ pr_warn(
+ "%s: #%u: rx got completion callback, ",
+ thread_name, total_tests - 1);
+ pr_warn("but status is \'%s\'\n",
+ status == DMA_ERROR ? "error" :
+ "in progress");
+ failed_tests++;
+ continue;
+ }
+
+ /* Unmap by myself */
+ for (i = 0; i < frm_cnt; i++)
+ dma_unmap_single(rx_dev->dev, dma_dsts[i],
+ test_buf_size, DMA_DEV_TO_MEM);
+
+ error_count = 0;
+
+ pr_debug("%s: verifying source buffer...\n", thread_name);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0, 0,
+ 0, PATTERN_SRC, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, true);
+ error_count += xilinx_vdmatest_verify(thread->srcs, len,
+ test_buf_size, len, PATTERN_SRC, true);
+
+ pr_debug("%s: verifying dest buffer...\n",
+ thread->task->comm);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0, 0,
+ 0, PATTERN_DST, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, 0,
+ len, 0, PATTERN_SRC | PATTERN_COPY, false);
+ error_count += xilinx_vdmatest_verify(thread->dsts, len,
+ test_buf_size, len, PATTERN_DST, false);
+
+ if (error_count) {
+ pr_warn("%s: #%u: %u errors with len=0x%x\n",
+ thread_name, total_tests - 1, error_count, len);
+ failed_tests++;
+ } else {
+ pr_debug("%s: #%u: No errors with len=0x%x\n",
+ thread_name, total_tests - 1, len);
+ }
+ }
+
+ ret = 0;
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
+err_dstbuf:
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
+err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
+ pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
+ thread_name, total_tests, failed_tests, ret);
+
+ thread->done = true;
+ wake_up(&thread_wait);
+
+ return ret;
+}
+
+static void xilinx_vdmatest_cleanup_channel(struct xilinx_vdmatest_chan *dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread, *_thread;
+ int ret;
+
+ list_for_each_entry_safe(thread, _thread,
+ &dtc->threads, node) {
+ ret = kthread_stop(thread->task);
+ pr_info("xilinx_vdmatest: thread %s exited with status %d\n",
+ thread->task->comm, ret);
+ list_del(&thread->node);
+ put_task_struct(thread->task);
+ kfree(thread);
+ }
+ kfree(dtc);
+}
+
+static int
+xilinx_vdmatest_add_slave_threads(struct xilinx_vdmatest_chan *tx_dtc,
+ struct xilinx_vdmatest_chan *rx_dtc)
+{
+ struct xilinx_vdmatest_slave_thread *thread;
+ struct dma_chan *tx_chan = tx_dtc->chan;
+ struct dma_chan *rx_chan = rx_dtc->chan;
+
+ thread = kzalloc(sizeof(struct xilinx_vdmatest_slave_thread),
+ GFP_KERNEL);
+ if (!thread)
+ pr_warn("xilinx_vdmatest: No memory for slave thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ thread->tx_chan = tx_chan;
+ thread->rx_chan = rx_chan;
+ thread->type = (enum dma_transaction_type)DMA_SLAVE;
+
+ /* This barrier ensures the DMA channels in the 'thread'
+ * are initialized
+ */
+ smp_wmb();
+ thread->task = kthread_run(xilinx_vdmatest_slave_func, thread, "%s-%s",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ if (IS_ERR(thread->task)) {
+ pr_warn("xilinx_vdmatest: Failed to run thread %s-%s\n",
+ dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+ kfree(thread);
+ return PTR_ERR(thread->task);
+ }
+
+ get_task_struct(thread->task);
+ list_add_tail(&thread->node, &tx_dtc->threads);
+
+ /* Added one thread with 2 channels */
+ return 1;
+}
+
+static int xilinx_vdmatest_add_slave_channels(struct dma_chan *tx_chan,
+ struct dma_chan *rx_chan)
+{
+ struct xilinx_vdmatest_chan *tx_dtc, *rx_dtc;
+ unsigned int thread_count = 0;
+
+ tx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!tx_dtc)
+ return -ENOMEM;
+
+ rx_dtc = kmalloc(sizeof(struct xilinx_vdmatest_chan), GFP_KERNEL);
+ if (!rx_dtc)
+ return -ENOMEM;
+
+ tx_dtc->chan = tx_chan;
+ rx_dtc->chan = rx_chan;
+ INIT_LIST_HEAD(&tx_dtc->threads);
+ INIT_LIST_HEAD(&rx_dtc->threads);
+
+ xilinx_vdmatest_add_slave_threads(tx_dtc, rx_dtc);
+ thread_count += 1;
+
+ pr_info("xilinx_vdmatest: Started %u threads using %s %s\n",
+ thread_count, dma_chan_name(tx_chan), dma_chan_name(rx_chan));
+
+ list_add_tail(&tx_dtc->node, &xilinx_vdmatest_channels);
+ list_add_tail(&rx_dtc->node, &xilinx_vdmatest_channels);
+ nr_channels += 2;
+
+ if (iterations)
+ wait_event(thread_wait, !is_threaded_test_run(tx_dtc, rx_dtc));
+
+ return 0;
+}
+
+static int xilinx_vdmatest_probe(struct platform_device *pdev)
+{
+ struct dma_chan *chan, *rx_chan;
+ int err;
+
+ err = of_property_read_u32(pdev->dev.of_node,
+ "xlnx,num-fstores", &frm_cnt);
+ if (err < 0) {
+ pr_err("xilinx_vdmatest: missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ chan = dma_request_slave_channel(&pdev->dev, "vdma0");
+ if (IS_ERR(chan)) {
+ pr_err("xilinx_vdmatest: No Tx channel\n");
+ return PTR_ERR(chan);
+ }
+
+ rx_chan = dma_request_slave_channel(&pdev->dev, "vdma1");
+ if (IS_ERR(rx_chan)) {
+ err = PTR_ERR(rx_chan);
+ pr_err("xilinx_vdmatest: No Rx channel\n");
+ goto free_tx;
+ }
+
+ err = xilinx_vdmatest_add_slave_channels(chan, rx_chan);
+ if (err) {
+ pr_err("xilinx_vdmatest: Unable to add channels\n");
+ goto free_rx;
+ }
+ return 0;
+
+free_rx:
+ dma_release_channel(rx_chan);
+free_tx:
+ dma_release_channel(chan);
+
+ return err;
+}
+
+static int xilinx_vdmatest_remove(struct platform_device *pdev)
+{
+ struct xilinx_vdmatest_chan *dtc, *_dtc;
+ struct dma_chan *chan;
+
+ list_for_each_entry_safe(dtc, _dtc, &xilinx_vdmatest_channels, node) {
+ list_del(&dtc->node);
+ chan = dtc->chan;
+ xilinx_vdmatest_cleanup_channel(dtc);
+ pr_info("xilinx_vdmatest: dropped channel %s\n",
+ dma_chan_name(chan));
+ dmaengine_terminate_async(chan);
+ dma_release_channel(chan);
+ }
+ return 0;
+}
+
+static const struct of_device_id xilinx_vdmatest_of_ids[] = {
+ { .compatible = "xlnx,axi-vdma-test-1.00.a",},
+ {}
+};
+
+static struct platform_driver xilinx_vdmatest_driver = {
+ .driver = {
+ .name = "xilinx_vdmatest",
+ .owner = THIS_MODULE,
+ .of_match_table = xilinx_vdmatest_of_ids,
+ },
+ .probe = xilinx_vdmatest_probe,
+ .remove = xilinx_vdmatest_remove,
+};
+
+module_platform_driver(xilinx_vdmatest_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx AXI VDMA Test Client");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
new file mode 100644
index 000000000000..cf17c20085e6
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -0,0 +1,2324 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP DPDMA Engine driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "../dmaengine.h"
+
+/* DPDMA registers */
+#define XILINX_DPDMA_ERR_CTRL 0x0
+#define XILINX_DPDMA_ISR 0x4
+#define XILINX_DPDMA_IMR 0x8
+#define XILINX_DPDMA_IEN 0xc
+#define XILINX_DPDMA_IDS 0x10
+#define XILINX_DPDMA_INTR_DESC_DONE_MASK (0x3f << 0)
+#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT 0
+#define XILINX_DPDMA_INTR_NO_OSTAND_MASK (0x3f << 6)
+#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT 6
+#define XILINX_DPDMA_INTR_AXI_ERR_MASK (0x3f << 12)
+#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT 12
+#define XILINX_DPDMA_INTR_DESC_ERR_MASK (0x3f << 18)
+#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT 16
+#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
+#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
+#define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
+#define XILINX_DPDMA_INTR_VSYNC BIT(27)
+#define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x41000
+#define XILINX_DPDMA_INTR_CHAN_ERR 0xfff000
+#define XILINX_DPDMA_INTR_GLOBAL_ERR 0x7000000
+#define XILINX_DPDMA_INTR_ERR_ALL 0x7fff000
+#define XILINX_DPDMA_INTR_CHAN_MASK 0x41041
+#define XILINX_DPDMA_INTR_GLOBAL_MASK 0xf000000
+#define XILINX_DPDMA_INTR_ALL 0xfffffff
+#define XILINX_DPDMA_EISR 0x14
+#define XILINX_DPDMA_EIMR 0x18
+#define XILINX_DPDMA_EIEN 0x1c
+#define XILINX_DPDMA_EIDS 0x20
+#define XILINX_DPDMA_EINTR_INV_APB BIT(0)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK (0x3f << 1)
+#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT 1
+#define XILINX_DPDMA_EINTR_PRE_ERR_MASK (0x3f << 7)
+#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT 7
+#define XILINX_DPDMA_EINTR_CRC_ERR_MASK (0x3f << 13)
+#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT 13
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK (0x3f << 19)
+#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT 19
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK (0x3f << 25)
+#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT 25
+#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
+#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x2082082
+#define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
+#define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
+#define XILINX_DPDMA_EINTR_ALL 0xffffffff
+#define XILINX_DPDMA_CNTL 0x100
+#define XILINX_DPDMA_GBL 0x104
+#define XILINX_DPDMA_GBL_TRIG_SHIFT 0
+#define XILINX_DPDMA_GBL_RETRIG_SHIFT 6
+#define XILINX_DPDMA_ALC0_CNTL 0x108
+#define XILINX_DPDMA_ALC0_STATUS 0x10c
+#define XILINX_DPDMA_ALC0_MAX 0x110
+#define XILINX_DPDMA_ALC0_MIN 0x114
+#define XILINX_DPDMA_ALC0_ACC 0x118
+#define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
+#define XILINX_DPDMA_ALC1_CNTL 0x120
+#define XILINX_DPDMA_ALC1_STATUS 0x124
+#define XILINX_DPDMA_ALC1_MAX 0x128
+#define XILINX_DPDMA_ALC1_MIN 0x12c
+#define XILINX_DPDMA_ALC1_ACC 0x130
+#define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
+
+/* Channel register */
+#define XILINX_DPDMA_CH_BASE 0x200
+#define XILINX_DPDMA_CH_OFFSET 0x100
+#define XILINX_DPDMA_CH_DESC_START_ADDRE 0x0
+#define XILINX_DPDMA_CH_DESC_START_ADDR 0x4
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x8
+#define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0xc
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x10
+#define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x14
+#define XILINX_DPDMA_CH_CNTL 0x18
+#define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
+#define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT 2
+#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT 6
+#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT 10
+#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
+#define XILINX_DPDMA_CH_STATUS 0x1c
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK (0xf << 21)
+#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT 21
+#define XILINX_DPDMA_CH_VDO 0x20
+#define XILINX_DPDMA_CH_PYLD_SZ 0x24
+#define XILINX_DPDMA_CH_DESC_ID 0x28
+
+/* DPDMA descriptor fields */
+#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE (0xa5)
+#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
+#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
+#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
+#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
+#define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
+#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
+#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
+#define XILINX_DPDMA_DESC_ID_MASK (0xffff << 0)
+#define XILINX_DPDMA_DESC_ID_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK (0x3ffff << 0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT (0)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK (0x3fff << 18)
+#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT (18)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK (0xfff)
+#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT (16)
+
+#define XILINX_DPDMA_ALIGN_BYTES 256
+#define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
+
+#define XILINX_DPDMA_NUM_CHAN 6
+#define XILINX_DPDMA_PAGE_MASK ((1 << 12) - 1)
+#define XILINX_DPDMA_PAGE_SHIFT 12
+
+/**
+ * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
+ * @control: control configuration field
+ * @desc_id: descriptor ID
+ * @xfer_size: transfer size
+ * @hsize_stride: horizontal size and stride
+ * @timestamp_lsb: LSB of time stamp
+ * @timestamp_msb: MSB of time stamp
+ * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
+ * @next_desc: next descriptor 32 bit address
+ * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
+ * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
+ * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
+ * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
+ * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
+ * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
+ * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
+ * @crc: descriptor CRC
+ */
+struct xilinx_dpdma_hw_desc {
+ u32 control;
+ u32 desc_id;
+ u32 xfer_size;
+ u32 hsize_stride;
+ u32 timestamp_lsb;
+ u32 timestamp_msb;
+ u32 addr_ext;
+ u32 next_desc;
+ u32 src_addr;
+ u32 addr_ext_23;
+ u32 addr_ext_45;
+ u32 src_addr2;
+ u32 src_addr3;
+ u32 src_addr4;
+ u32 src_addr5;
+ u32 crc;
+} __aligned(XILINX_DPDMA_ALIGN_BYTES);
+
+/**
+ * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
+ * @hw: DPDMA hardware descriptor
+ * @node: list node for software descriptors
+ * @phys: physical address of the software descriptor
+ */
+struct xilinx_dpdma_sw_desc {
+ struct xilinx_dpdma_hw_desc hw;
+ struct list_head node;
+ dma_addr_t phys;
+};
+
+/**
+ * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
+ * @PREPARED: descriptor is prepared for transaction
+ * @ACTIVE: transaction is (being) done successfully
+ * @ERRORED: descriptor generates some errors
+ */
+enum xilinx_dpdma_tx_desc_status {
+ PREPARED,
+ ACTIVE,
+ ERRORED
+};
+
+/**
+ * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
+ * @async_tx: DMA async transaction descriptor
+ * @descriptors: list of software descriptors
+ * @node: list node for transaction descriptors
+ * @status: tx descriptor status
+ * @done_cnt: number of complete notification to deliver
+ */
+struct xilinx_dpdma_tx_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head descriptors;
+ struct list_head node;
+ enum xilinx_dpdma_tx_desc_status status;
+ unsigned int done_cnt;
+};
+
+/**
+ * enum xilinx_dpdma_chan_id - DPDMA channel ID
+ * @VIDEO0: video 1st channel
+ * @VIDEO1: video 2nd channel for multi plane yuv formats
+ * @VIDEO2: video 3rd channel for multi plane yuv formats
+ * @GRAPHICS: graphics channel
+ * @AUDIO0: 1st audio channel
+ * @AUDIO1: 2nd audio channel
+ */
+enum xilinx_dpdma_chan_id {
+ VIDEO0,
+ VIDEO1,
+ VIDEO2,
+ GRAPHICS,
+ AUDIO0,
+ AUDIO1
+};
+
+/**
+ * enum xilinx_dpdma_chan_status - DPDMA channel status
+ * @IDLE: idle state
+ * @STREAMING: actively streaming state
+ */
+enum xilinx_dpdma_chan_status {
+ IDLE,
+ STREAMING
+};
+
+/*
+ * DPDMA descriptor placement
+ * --------------------------
+ * DPDMA descritpor life time is described with following placements:
+ *
+ * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
+ *
+ * Transition is triggered as following:
+ *
+ * -> allocated_desc : a descriptor allocation
+ * allocated_desc -> submitted_desc: a descriptor submission
+ * submitted_desc -> pending_desc: request to issue pending a descriptor
+ * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
+ * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
+ */
+
+/**
+ * struct xilinx_dpdma_chan - DPDMA channel
+ * @common: generic dma channel structure
+ * @reg: register base address
+ * @id: channel ID
+ * @wait_to_stop: queue to wait for outstanding transacitons before stopping
+ * @status: channel status
+ * @first_frame: flag for the first frame of stream
+ * @video_group: flag if multi-channel operation is needed for video channels
+ * @lock: lock to access struct xilinx_dpdma_chan
+ * @desc_pool: descriptor allocation pool
+ * @done_task: done IRQ bottom half handler
+ * @err_task: error IRQ bottom half handler
+ * @allocated_desc: allocated descriptor
+ * @submitted_desc: submitted descriptor
+ * @pending_desc: pending descriptor to be scheduled in next period
+ * @active_desc: descriptor that the DPDMA channel is active on
+ * @done_list: done descriptor list
+ * @xdev: DPDMA device
+ */
+struct xilinx_dpdma_chan {
+ struct dma_chan common;
+ void __iomem *reg;
+ enum xilinx_dpdma_chan_id id;
+
+ wait_queue_head_t wait_to_stop;
+ enum xilinx_dpdma_chan_status status;
+ bool first_frame;
+ bool video_group;
+
+ spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
+ struct dma_pool *desc_pool;
+ struct tasklet_struct done_task;
+ struct tasklet_struct err_task;
+
+ struct xilinx_dpdma_tx_desc *allocated_desc;
+ struct xilinx_dpdma_tx_desc *submitted_desc;
+ struct xilinx_dpdma_tx_desc *pending_desc;
+ struct xilinx_dpdma_tx_desc *active_desc;
+ struct list_head done_list;
+
+ struct xilinx_dpdma_device *xdev;
+};
+
+/**
+ * struct xilinx_dpdma_device - DPDMA device
+ * @common: generic dma device structure
+ * @reg: register base address
+ * @dev: generic device structure
+ * @axi_clk: axi clock
+ * @chan: DPDMA channels
+ * @ext_addr: flag for 64 bit system (48 bit addressing)
+ * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
+ */
+struct xilinx_dpdma_device {
+ struct dma_device common;
+ void __iomem *reg;
+ struct device *dev;
+
+ struct clk *axi_clk;
+ struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
+
+ bool ext_addr;
+ void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[], unsigned int num_src_addr);
+};
+
+#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+#define IN_RANGE(x, min, max) ({ \
+ typeof(x) _x = (x); \
+ _x >= (min) && _x <= (max); })
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_intr_done_count;
+ enum xilinx_dpdma_chan_id chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *req;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read_handler)(char **kern_buff);
+ ssize_t (*write_handler)(char **cmd);
+};
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+ if (chan_id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_intr_done_count++;
+}
+
+static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
+{
+ s64 value;
+
+ if (!arg)
+ return -1;
+
+ if (!kstrtos64(arg, 0, &value))
+ return value;
+
+ return -1;
+}
+
+static ssize_t
+xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
+{
+ char *arg;
+ char *arg_chan_id;
+ s64 id;
+
+ arg = strsep(dpdma_test_arg, " ");
+ if (strncasecmp(arg, "start", 5) != 0)
+ return -EINVAL;
+
+ arg_chan_id = strsep(dpdma_test_arg, " ");
+ id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
+
+ if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(*kern_buff, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_intr_done_count);
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
+ xilinx_dpdma_debugfs_desc_done_intr_read,
+ xilinx_dpdma_debugfs_desc_done_intr_write},
+};
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
+ *buf, size_t size, loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *dpdma_test_req;
+ int ret;
+ int i;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now*/
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0) {
+ kfree(kern_buff_start);
+ return ret;
+ }
+
+ /* Read the testcase name from a user request */
+ dpdma_test_req = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
+ if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
+ kfree(kern_buff_start);
+ return size;
+ }
+ break;
+ }
+ }
+ kfree(kern_buff_start);
+ return -EINVAL;
+}
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *kern_buff = NULL;
+ size_t kern_buff_len, out_str_len;
+ enum xilinx_dpdma_testcases tc;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ tc = dpdma_debugfs.testcase;
+ if (tc == DPDMA_TC_NONE) {
+ out_str_len = strlen("No testcase executed");
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
+ } else {
+ ret = dpdma_debugfs_reqs[tc].read_handler(&kern_buff);
+ if (ret) {
+ kfree(kern_buff);
+ return ret;
+ }
+ }
+
+ kern_buff_len = strlen(kern_buff);
+ size = min(size, kern_buff_len);
+
+ ret = copy_to_user(buf, kern_buff, size);
+
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ int err;
+ struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
+ if (!xilinx_dpdma_debugfs_dir) {
+ dev_err(dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dpdma_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dpdma_debugfs_dir, NULL,
+ &fops_xilinx_dpdma_dbgfs);
+ if (!xilinx_dpdma_debugfs_file) {
+ dev_err(dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
+ xilinx_dpdma_debugfs_dir = NULL;
+ return err;
+}
+
+#else
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ return 0;
+}
+
+static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
+{
+}
+#endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
+
+#define to_dpdma_tx_desc(tx) \
+ container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_dpdma_chan, common)
+
+/* IO operations */
+
+static inline u32 dpdma_read(void __iomem *base, u32 offset)
+{
+ return ioread32(base + offset);
+}
+
+static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
+{
+ iowrite32(val, base + offset);
+}
+
+static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
+}
+
+static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
+{
+ dpdma_write(base, offset, dpdma_read(base, offset) | set);
+}
+
+/* Xilinx DPDMA descriptor operations */
+
+/**
+ * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 32 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = next->phys;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 32 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+
+ hw_desc->src_addr = dma_addr[0];
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i - 1] = frag_addr;
+ }
+}
+
+/**
+ * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
+ * @sw_desc: current software descriptor
+ * @next: next descriptor
+ *
+ * Update the current sw descriptor @sw_desc with 64 bit address of the next
+ * descriptor @next.
+ */
+static inline void
+xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *next)
+{
+ sw_desc->hw.next_desc = lower_32_bits(next->phys);
+ sw_desc->hw.addr_ext |= upper_32_bits(next->phys) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+}
+
+/**
+ * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
+ * @sw_desc: software descriptor
+ * @prev: previous descriptor
+ * @dma_addr: array of dma addresses
+ * @num_src_addr: number of addresses in @dma_addr
+ *
+ * Update the descriptor @sw_desc with 64 bit address.
+ */
+static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
+ struct xilinx_dpdma_sw_desc *prev,
+ dma_addr_t dma_addr[],
+ unsigned int num_src_addr)
+{
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+ unsigned int i;
+ u32 src_addr_extn;
+
+ hw_desc->src_addr = lower_32_bits(dma_addr[0]);
+ src_addr_extn = upper_32_bits(dma_addr[0]) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ hw_desc->addr_ext |= (src_addr_extn <<
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT);
+
+ if (prev)
+ xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
+
+ for (i = 1; i < num_src_addr; i++) {
+ u32 *addr = &hw_desc->src_addr2;
+ u32 *addr_ext = &hw_desc->addr_ext_23;
+ u64 frag_addr;
+
+ frag_addr = dma_addr[i];
+ addr[i] = (u32)frag_addr;
+
+ frag_addr >>= 32;
+ frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
+ frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
+ addr_ext[i / 2] = frag_addr;
+ }
+}
+
+/* Xilinx DPDMA channel descriptor operations */
+
+/**
+ * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a software descriptor from the channel's descriptor pool.
+ *
+ * Return: a software descriptor or NULL.
+ */
+static struct xilinx_dpdma_sw_desc *
+xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_addr_t phys;
+
+ sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->phys = phys;
+
+ return sw_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
+ * @chan: DPDMA channel
+ * @sw_desc: software descriptor to free
+ *
+ * Free a software descriptor from the channel's descriptor pool.
+ */
+static void
+xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_sw_desc *sw_desc)
+{
+ dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
+}
+
+/**
+ * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor to dump
+ *
+ * Dump contents of a tx descriptor
+ */
+static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct device *dev = chan->xdev->dev;
+ unsigned int i = 0;
+
+ dev_dbg(dev, "------- TX descriptor dump start -------\n");
+ dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
+
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
+ struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
+
+ dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
+ dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
+ dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
+ dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
+ dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
+ dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
+ dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
+ dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
+ dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
+ dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
+ dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
+ dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
+ dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
+ dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
+ dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
+ dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
+ dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
+ dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
+ }
+
+ dev_dbg(dev, "------- TX descriptor dump end -------\n");
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
+ * @chan: DPDMA channel
+ *
+ * Allocate a tx descriptor.
+ *
+ * Return: a tx descriptor or NULL.
+ */
+static struct xilinx_dpdma_tx_desc *
+xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+
+ tx_desc = kzalloc(sizeof(*tx_desc), GFP_ATOMIC);
+ if (!tx_desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&tx_desc->descriptors);
+ tx_desc->status = PREPARED;
+
+ return tx_desc;
+}
+
+/**
+ * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Free the tx descriptor @tx_desc including its software descriptors.
+ */
+static void
+xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc, *next;
+
+ if (!tx_desc)
+ return;
+
+ list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
+ list_del(&sw_desc->node);
+ xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
+ }
+
+ kfree(tx_desc);
+}
+
+/**
+ * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
+ * @chan: DPDMA channel
+ * @tx_desc: tx descriptor
+ *
+ * Submit the tx descriptor @tx_desc to the channel @chan.
+ *
+ * Return: a cookie assigned to the tx descriptor
+ */
+static dma_cookie_t
+xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
+ struct xilinx_dpdma_tx_desc *tx_desc)
+{
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->submitted_desc) {
+ cookie = chan->submitted_desc->async_tx.cookie;
+ goto out_unlock;
+ }
+
+ cookie = dma_cookie_assign(&tx_desc->async_tx);
+
+ /* Assign the cookie to descriptors in this transaction */
+ /* Only 16 bit will be used, but it should be enough */
+ list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
+ sw_desc->hw.desc_id = cookie;
+
+ if (tx_desc != chan->allocated_desc)
+ dev_err(chan->xdev->dev, "desc != allocated_desc\n");
+ else
+ chan->allocated_desc = NULL;
+ chan->submitted_desc = tx_desc;
+
+ if (chan->id == VIDEO1 || chan->id == VIDEO2) {
+ chan->video_group = true;
+ chan->xdev->chan[VIDEO0]->video_group = true;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
+ * @chan: DPDMA channel
+ * @list: tx descriptor list
+ *
+ * Free tx descriptors in the list @list.
+ */
+static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc, *next;
+
+ list_for_each_entry_safe(tx_desc, next, list, node) {
+ list_del(&tx_desc->node);
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+ }
+}
+
+/**
+ * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors associated with the channel. The channel should be
+ * disabled before this function is called, otherwise, this function may
+ * result in misbehavior of the system due to remaining outstanding
+ * transactions.
+ */
+static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(chan->xdev->dev, "chan->status = %s\n",
+ chan->status == STREAMING ? "STREAMING" : "IDLE");
+
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
+ chan->allocated_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
+ chan->submitted_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
+ chan->pending_desc = NULL;
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ chan->active_desc = NULL;
+ xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
+ * @chan: DPDMA channel
+ *
+ * Trigger the complete callbacks of descriptors with finished transactions.
+ * Free descriptors which are no longer in use.
+ */
+static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_tx_desc *desc;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ unsigned long flags;
+ unsigned int cnt, i;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ while (!list_empty(&chan->done_list)) {
+ desc = list_first_entry(&chan->done_list,
+ struct xilinx_dpdma_tx_desc, node);
+ list_del(&desc->node);
+
+ cnt = desc->done_cnt;
+ desc->done_cnt = 0;
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ xilinx_dpdma_chan_free_tx_desc(chan, desc);
+ }
+
+ if (chan->active_desc) {
+ cnt = chan->active_desc->done_cnt;
+ chan->active_desc->done_cnt = 0;
+ callback = chan->active_desc->async_tx.callback;
+ callback_param = chan->active_desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ for (i = 0; i < cnt; i++)
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_active - Set the descriptor as active
+ * @chan: DPDMA channel
+ *
+ * Make the pending descriptor @chan->pending_desc as active. This function
+ * should be called when the channel starts operating on the pending descriptor.
+ */
+static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->pending_desc)
+ goto out_unlock;
+
+ if (chan->active_desc)
+ list_add_tail(&chan->active_desc->node, &chan->done_list);
+
+ chan->active_desc = chan->pending_desc;
+ chan->pending_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
+ * @chan: DPDMA channel
+ *
+ * Mark the current active descriptor @chan->active_desc as 'done'. This
+ * function should be called to mark completion of the currently active
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
+
+ if (!chan->active_desc) {
+ dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
+ goto out_unlock;
+ }
+
+ chan->active_desc->done_cnt++;
+ if (chan->active_desc->status == PREPARED) {
+ dma_cookie_complete(&chan->active_desc->async_tx);
+ chan->active_desc->status = ACTIVE;
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ tasklet_schedule(&chan->done_task);
+}
+
+/**
+ * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
+ * @chan: DPDMA channel
+ * @sgl: scatter-gather list
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given scatter-gather transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
+ struct scatterlist *sgl)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ struct scatterlist *iter = sgl;
+ u32 line_size = 0;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ while (!sg_is_chain(iter))
+ line_size += sg_dma_len(iter++);
+
+ while (sgl) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ dma_addr_t dma_addr[4];
+ unsigned int num_pages = 0;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
+ dma_addr[num_pages] = sg_dma_address(sgl++);
+ if (!IS_ALIGNED(dma_addr[num_pages++],
+ XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+ }
+
+ chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = line_size;
+ hw_desc->hsize_stride =
+ line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ last = sw_desc;
+ if (sg_is_last(sgl))
+ break;
+ sgl = sg_chain_ptr(sgl);
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
+ * @chan: DPDMA channel
+ * @buf_addr: buffer address
+ * @buf_len: buffer length
+ * @period_len: number of periods
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * for the given cyclic transaction.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
+ unsigned int periods = buf_len / period_len;
+ unsigned int i;
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ struct xilinx_dpdma_hw_desc *hw_desc;
+
+ if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev,
+ "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ goto error;
+ }
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
+ hw_desc = &sw_desc->hw;
+ hw_desc->xfer_size = period_len;
+ hw_desc->hsize_stride =
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |=
+ period_len <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+
+ buf_addr += period_len;
+ last = sw_desc;
+ }
+
+ sw_desc = list_first_entry(&tx_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ if (chan->xdev->ext_addr)
+ xilinx_dpdma_sw_desc_next_64(last, sw_desc);
+ else
+ xilinx_dpdma_sw_desc_next_32(last, sw_desc);
+ last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
+ * @chan: DPDMA channel
+ * @xt: dma interleaved template
+ *
+ * Prepare a tx descriptor incudling internal software/hardware descriptors
+ * based on @xt.
+ *
+ * Return: A dma async tx descriptor on success, or NULL.
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
+ struct dma_interleaved_template *xt)
+{
+ struct xilinx_dpdma_tx_desc *tx_desc;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ struct xilinx_dpdma_hw_desc *hw_desc;
+ size_t hsize = xt->sgl[0].size;
+ size_t stride = hsize + xt->sgl[0].icg;
+
+ if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
+ dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
+ XILINX_DPDMA_ALIGN_BYTES);
+ return NULL;
+ }
+
+ if (chan->allocated_desc)
+ return &chan->allocated_desc->async_tx;
+
+ tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
+ if (!tx_desc)
+ return NULL;
+
+ sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
+ if (!sw_desc)
+ goto error;
+
+ chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
+ hw_desc = &sw_desc->hw;
+ hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
+ hw_desc->xfer_size = hsize * xt->numf;
+ hw_desc->hsize_stride = hsize <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
+ hw_desc->hsize_stride |= (stride / 16) <<
+ XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
+ hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
+
+ list_add_tail(&sw_desc->node, &tx_desc->descriptors);
+ chan->allocated_desc = tx_desc;
+
+ return &tx_desc->async_tx;
+
+error:
+ xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
+
+ return NULL;
+}
+
+/* Xilinx DPDMA channel operations */
+
+/**
+ * xilinx_dpdma_chan_enable - Enable the channel
+ * @chan: DPDMA channel
+ *
+ * Enable the channel and its interrupts. Set the QoS values for video class.
+ */
+static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ reg = XILINX_DPDMA_CH_CNTL_ENABLE;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
+ reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
+ XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
+}
+
+/**
+ * xilinx_dpdma_chan_disable - Disable the channel
+ * @chan: DPDMA channel
+ *
+ * Disable the channel and its interrupts.
+ */
+static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
+{
+ u32 reg;
+
+ reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
+ reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
+
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
+}
+
+/**
+ * xilinx_dpdma_chan_pause - Pause the channel
+ * @chan: DPDMA channel
+ *
+ * Pause the channel.
+ */
+static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+/**
+ * xilinx_dpdma_chan_unpause - Unpause the channel
+ * @chan: DPDMA channel
+ *
+ * Unpause the channel.
+ */
+static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
+{
+ dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
+}
+
+static u32
+xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ u32 i = 0, ret = 0;
+
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status != STREAMING)
+ return 0;
+
+ if (xdev->chan[i]->video_group)
+ ret |= BIT(i);
+ }
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
+ * @chan: DPDMA channel
+ *
+ * Issue the first pending descriptor from @chan->submitted_desc. If the channel
+ * is already streaming, the channel is re-triggered with the pending
+ * descriptor.
+ */
+static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct xilinx_dpdma_sw_desc *sw_desc;
+ unsigned long flags;
+ u32 reg, channels;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->pending_desc)
+ goto out_unlock;
+
+ chan->pending_desc = chan->submitted_desc;
+ chan->submitted_desc = NULL;
+
+ sw_desc = list_first_entry(&chan->pending_desc->descriptors,
+ struct xilinx_dpdma_sw_desc, node);
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
+ (u32)sw_desc->phys);
+ if (xdev->ext_addr)
+ dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
+ ((u64)sw_desc->phys >> 32) &
+ XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
+
+ if (chan->first_frame) {
+ chan->first_frame = false;
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
+ }
+ } else {
+ if (chan->video_group) {
+ channels = xilinx_dpdma_chan_video_group_ready(chan);
+ if (!channels)
+ goto out_unlock;
+ reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
+ } else {
+ reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
+ }
+ }
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_start - Start the channel
+ * @chan: DPDMA channel
+ *
+ * Start the channel by enabling interrupts and triggering the channel.
+ * If the channel is enabled already or there's no pending descriptor, this
+ * function won't do anything on the channel.
+ */
+static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->submitted_desc || chan->status == STREAMING)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_unpause(chan);
+ xilinx_dpdma_chan_enable(chan);
+ chan->first_frame = true;
+ chan->status = STREAMING;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dpdma_chan_ostand - Number of outstanding transactions
+ * @chan: DPDMA channel
+ *
+ * Read and return the number of outstanding transactions from register.
+ *
+ * Return: Number of outstanding transactions from the status register.
+ */
+static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
+{
+ return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
+ XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
+}
+
+/**
+ * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
+ * @chan: DPDMA channel
+ *
+ * Notify waiters for no outstanding event, so waiters can stop the channel
+ * safely. This function is supposed to be called when 'no outstanding'
+ * interrupt is generated. The 'no outstanding' interrupt is disabled and
+ * should be re-enabled when this event is handled. If the channel status
+ * register still shows some number of outstanding transactions, the interrupt
+ * remains enabled.
+ *
+ * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
+ * transaction(s).
+ */
+static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt;
+
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ if (cnt) {
+ dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
+ return -EWOULDBLOCK;
+ }
+
+ /* Disable 'no outstanding' interrupt */
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
+ 1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ wake_up(&chan->wait_to_stop);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding intr
+ * @chan: DPDMA channel
+ *
+ * Wait for the no outstanding transaction interrupt. This functions can sleep
+ * for 50ms.
+ *
+ * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
+ * from wait_event_interruptible_timeout().
+ */
+static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ int ret;
+
+ /* Wait for a no outstanding transaction interrupt upto 50msec */
+ ret = wait_event_interruptible_timeout(chan->wait_to_stop,
+ !xilinx_dpdma_chan_ostand(chan),
+ msecs_to_jiffies(50));
+ if (ret > 0) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return ret;
+}
+
+/**
+ * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
+ * @chan: DPDMA channel
+ *
+ * Poll the outstanding transaction status, and return when there's no
+ * outstanding transaction. This functions can be used in the interrupt context
+ * or where the atomicity is required. Calling thread may wait more than 50ms.
+ *
+ * Return: 0 on success, or -ETIMEDOUT.
+ */
+static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
+{
+ u32 cnt, loop = 50000;
+
+ /* Poll at least for 50ms (20 fps). */
+ do {
+ cnt = xilinx_dpdma_chan_ostand(chan);
+ udelay(1);
+ } while (loop-- > 0 && cnt);
+
+ if (loop) {
+ dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
+ 1 <<
+ (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
+ return 0;
+ }
+
+ dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
+ xilinx_dpdma_chan_ostand(chan));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xilinx_dpdma_chan_stop - Stop the channel
+ * @chan: DPDMA channel
+ * @poll: flag whether to poll or wait
+ *
+ * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
+ * no outstanding transaction interrupt, 3. Disable the channel.
+ *
+ * Return: 0 on success, or an error from xilinx_dpdma_chan_poll/wait_ostand().
+ */
+static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan, bool poll)
+{
+ unsigned long flags;
+ bool ret;
+
+ xilinx_dpdma_chan_pause(chan);
+ if (poll)
+ ret = xilinx_dpdma_chan_poll_no_ostand(chan);
+ else
+ ret = xilinx_dpdma_chan_wait_no_ostand(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Allocate a descriptor pool for the channel.
+ *
+ * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
+ */
+static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
+{
+ chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
+ chan->xdev->dev,
+ sizeof(struct xilinx_dpdma_sw_desc),
+ __alignof__(struct xilinx_dpdma_sw_desc), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->xdev->dev,
+ "failed to allocate a descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_free_resources - Free all resources for the channel
+ * @chan: DPDMA channel
+ *
+ * Free all descriptors and the descriptor pool for the channel.
+ */
+static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
+{
+ xilinx_dpdma_chan_free_all_desc(chan);
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. Poll the no outstanding
+ * transaction interrupt as this can be called from an atomic context.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, true);
+ if (ret)
+ return ret;
+
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_synchronize - Synchronize all outgoing transfer
+ * @chan: DPDMA channel
+ *
+ * Stop the channel and free all associated descriptors. As this can't be
+ * called in an atomic context, sleep-wait for no outstanding transaction
+ * interrupt. Then kill all related tasklets.
+ *
+ * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
+ */
+static int xilinx_dpdma_chan_synchronize(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ int ret;
+ unsigned int i;
+
+ if (chan->video_group) {
+ for (i = VIDEO0; i < GRAPHICS; i++) {
+ if (xdev->chan[i]->video_group &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_pause(xdev->chan[i]);
+ xdev->chan[i]->video_group = false;
+ }
+ }
+ }
+
+ ret = xilinx_dpdma_chan_stop(chan, false);
+ if (ret)
+ return ret;
+
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ xilinx_dpdma_chan_free_all_desc(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_dpdma_chan_err - Detect any channel error
+ * @chan: DPDMA channel
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: true if any channel error occurs, or false otherwise.
+ */
+static bool
+xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
+{
+ if (!chan)
+ return false;
+
+ if (chan->status == STREAMING &&
+ ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
+ (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
+ * @chan: DPDMA channel
+ *
+ * This function is called when any channel error or any global error occurs.
+ * The function disables the paused channel by errors and determines
+ * if the current active descriptor can be rescheduled depending on
+ * the descriptor status.
+ */
+static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
+{
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+ struct device *dev = xdev->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
+ dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
+ dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
+
+ xilinx_dpdma_chan_disable(chan);
+ chan->status = IDLE;
+
+ if (!chan->active_desc)
+ goto out_unlock;
+
+ xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
+
+ switch (chan->active_desc->status) {
+ case ERRORED:
+ dev_dbg(dev, "repeated error on desc\n");
+ /* fall-through */
+ case ACTIVE:
+ /* fall-through */
+ case PREPARED:
+ /* Reschedule if there's no new descriptor */
+ if (!chan->pending_desc && !chan->submitted_desc) {
+ chan->active_desc->status = ERRORED;
+ chan->submitted_desc = chan->active_desc;
+ } else {
+ xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
+ }
+ break;
+ }
+ chan->active_desc = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/* DMA tx descriptor */
+
+static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
+ struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
+
+ return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
+}
+
+/* DMA channel operations */
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!sgl || sg_len < 2)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (direction != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
+ period_len);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static struct dma_async_tx_descriptor *
+xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct dma_async_tx_descriptor *async_tx;
+
+ if (xt->dir != DMA_MEM_TO_DEV)
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
+ if (!async_tx)
+ return NULL;
+
+ dma_async_tx_descriptor_init(async_tx, dchan);
+ async_tx->tx_submit = xilinx_dpdma_tx_submit;
+ async_tx->flags = flags;
+ async_tx_ack(async_tx);
+
+ return async_tx;
+}
+
+static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ dma_cookie_init(dchan);
+
+ return xilinx_dpdma_chan_alloc_resources(chan);
+}
+
+static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_free_resources(chan);
+}
+
+static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+static int xilinx_dpdma_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ if (config->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int xilinx_dpdma_pause(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_resume(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
+
+ return 0;
+}
+
+static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
+{
+ return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
+}
+
+static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
+{
+ xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
+}
+
+/* Xilinx DPDMA device operations */
+
+/**
+ * xilinx_dpdma_err - Detect any global error
+ * @isr: Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Return: True if any global error occurs, or false otherwise.
+ */
+static bool xilinx_dpdma_err(u32 isr, u32 eisr)
+{
+ if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
+ eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
+ return true;
+
+ return false;
+}
+
+/**
+ * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
+ * @xdev: DPDMA device
+ * @isr: masked Interrupt Status Register
+ * @eisr: Error Interrupt Status Register
+ *
+ * Handle if any error occurs based on @isr and @eisr. This function disables
+ * corresponding error interrupts, and those should be re-enabled once handling
+ * is done.
+ */
+static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
+ u32 isr, u32 eisr)
+{
+ bool err = xilinx_dpdma_err(isr, eisr);
+ unsigned int i;
+
+ dev_dbg_ratelimited(xdev->dev,
+ "error intr: isr = 0x%08x, eisr = 0x%08x\n",
+ isr, eisr);
+
+ /* Disable channel error interrupts until errors are handled. */
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
+ isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
+ eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
+ tasklet_schedule(&xdev->chan[i]->err_task);
+}
+
+/**
+ * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
+ * @xdev: DPDMA device
+ *
+ * Handle the VSYNC event. At this point, the current frame becomes active,
+ * which means the DPDMA actually starts fetching, and the next frame can be
+ * scheduled.
+ */
+static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
+ if (xdev->chan[i] &&
+ xdev->chan[i]->status == STREAMING) {
+ xilinx_dpdma_chan_desc_active(xdev->chan[i]);
+ xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
+ }
+ }
+}
+
+/**
+ * xilinx_dpdma_enable_intr - Enable interrupts
+ * @xdev: DPDMA device
+ *
+ * Enable interrupts.
+ */
+static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
+}
+
+/**
+ * xilinx_dpdma_disable_intr - Disable interrupts
+ * @xdev: DPDMA device
+ *
+ * Disable interrupts.
+ */
+static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
+{
+ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
+}
+
+/* Interrupt handling operations*/
+
+/**
+ * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel error handling tasklet. This function waits for the outstanding
+ * transaction to complete and triggers error handling. After error handling,
+ * re-enable channel error interrupts, and restart the channel if needed.
+ */
+static void xilinx_dpdma_chan_err_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+ struct xilinx_dpdma_device *xdev = chan->xdev;
+
+ /* Proceed error handling even when polling fails. */
+ xilinx_dpdma_chan_poll_no_ostand(chan);
+
+ xilinx_dpdma_chan_handle_err(chan);
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
+ XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
+ XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
+
+ xilinx_dpdma_chan_start(chan);
+ xilinx_dpdma_chan_issue_pending(chan);
+}
+
+/**
+ * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
+ * @data: tasklet data to be casted to DPDMA channel structure
+ *
+ * Per channel done interrupt handling tasklet.
+ */
+static void xilinx_dpdma_chan_done_task(unsigned long data)
+{
+ struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
+
+ xilinx_dpdma_chan_cleanup_desc(chan);
+}
+
+static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dpdma_device *xdev = data;
+ u32 status, error, i;
+ unsigned long masked;
+
+ status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
+ error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
+ if (!status && !error)
+ return IRQ_NONE;
+
+ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
+ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
+
+ if (status & XILINX_DPDMA_INTR_VSYNC)
+ xilinx_dpdma_handle_vsync_intr(xdev);
+
+ masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
+ XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
+
+ masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
+ XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
+ if (masked)
+ for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
+ xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
+
+ masked = status & XILINX_DPDMA_INTR_ERR_ALL;
+ if (masked || error)
+ xilinx_dpdma_handle_err_intr(xdev, masked, error);
+
+ return IRQ_HANDLED;
+}
+
+/* Initialization operations */
+
+static struct xilinx_dpdma_chan *
+xilinx_dpdma_chan_probe(struct device_node *node,
+ struct xilinx_dpdma_device *xdev)
+{
+ struct xilinx_dpdma_chan *chan;
+
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_device_is_compatible(node, "xlnx,video0")) {
+ chan->id = VIDEO0;
+ } else if (of_device_is_compatible(node, "xlnx,video1")) {
+ chan->id = VIDEO1;
+ } else if (of_device_is_compatible(node, "xlnx,video2")) {
+ chan->id = VIDEO2;
+ } else if (of_device_is_compatible(node, "xlnx,graphics")) {
+ chan->id = GRAPHICS;
+ } else if (of_device_is_compatible(node, "xlnx,audio0")) {
+ chan->id = AUDIO0;
+ } else if (of_device_is_compatible(node, "xlnx,audio1")) {
+ chan->id = AUDIO1;
+ } else {
+ dev_err(xdev->dev, "invalid channel compatible string in DT\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
+ chan->id;
+ chan->status = IDLE;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->done_list);
+ init_waitqueue_head(&chan->wait_to_stop);
+
+ tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
+ (unsigned long)chan);
+ tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
+ (unsigned long)chan);
+
+ chan->common.device = &xdev->common;
+ chan->xdev = xdev;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ return chan;
+}
+
+static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
+{
+ tasklet_kill(&chan->err_task);
+ tasklet_kill(&chan->done_task);
+ list_del(&chan->common.device_node);
+}
+
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
+ uint32_t chan_id = dma_spec->args[0];
+
+ if (chan_id >= XILINX_DPDMA_NUM_CHAN)
+ return NULL;
+
+ if (!xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static int xilinx_dpdma_probe(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ struct xilinx_dpdma_chan *chan;
+ struct dma_device *ddev;
+ struct resource *res;
+ struct device_node *node, *child;
+ u32 i;
+ int irq, ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ ddev = &xdev->common;
+ ddev->dev = &pdev->dev;
+ node = xdev->dev->of_node;
+
+ xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
+ if (IS_ERR(xdev->axi_clk))
+ return PTR_ERR(xdev->axi_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xdev->reg))
+ return PTR_ERR(xdev->reg);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(xdev->dev, "failed to get platform irq\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
+ IRQF_SHARED, dev_name(xdev->dev), xdev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
+ ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
+
+ ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
+ ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
+ ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
+ ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
+ ddev->device_tx_status = xilinx_dpdma_tx_status;
+ ddev->device_issue_pending = xilinx_dpdma_issue_pending;
+ ddev->device_config = xilinx_dpdma_config;
+ ddev->device_pause = xilinx_dpdma_pause;
+ ddev->device_resume = xilinx_dpdma_resume;
+ ddev->device_terminate_all = xilinx_dpdma_terminate_all;
+ ddev->device_synchronize = xilinx_dpdma_synchronize;
+ ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
+ ddev->directions = BIT(DMA_MEM_TO_DEV);
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ for_each_child_of_node(node, child) {
+ chan = xilinx_dpdma_chan_probe(child, xdev);
+ if (IS_ERR(chan)) {
+ dev_err(xdev->dev, "failed to probe a channel\n");
+ ret = PTR_ERR(chan);
+ goto error;
+ }
+ }
+
+ xdev->ext_addr = sizeof(dma_addr_t) > 4;
+ if (xdev->ext_addr)
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
+ else
+ xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
+
+ ret = clk_prepare_enable(xdev->axi_clk);
+ if (ret) {
+ dev_err(xdev->dev, "failed to enable the axi clock\n");
+ goto error;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error_dma_async;
+ }
+
+ ret = of_dma_controller_register(xdev->dev->of_node,
+ of_dma_xilinx_xlate, ddev);
+ if (ret) {
+ dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ xilinx_dpdma_enable_intr(xdev);
+
+ xilinx_dpdma_debugfs_init(&pdev->dev);
+
+ dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
+
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+error_dma_async:
+ clk_disable_unprepare(xdev->axi_clk);
+error:
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return ret;
+}
+
+static int xilinx_dpdma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dpdma_device *xdev;
+ unsigned int i;
+
+ xdev = platform_get_drvdata(pdev);
+
+ xilinx_dpdma_disable_intr(xdev);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&xdev->common);
+ clk_disable_unprepare(xdev->axi_clk);
+
+ for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
+ if (xdev->chan[i])
+ xilinx_dpdma_chan_remove(xdev->chan[i]);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dpdma_of_match[] = {
+ { .compatible = "xlnx,dpdma",},
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
+
+static struct platform_driver xilinx_dpdma_driver = {
+ .probe = xilinx_dpdma_probe,
+ .remove = xilinx_dpdma_remove,
+ .driver = {
+ .name = "xilinx-dpdma",
+ .of_match_table = xilinx_dpdma_of_match,
+ },
+};
+
+module_platform_driver(xilinx_dpdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DPDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_frmbuf.c b/drivers/dma/xilinx/xilinx_frmbuf.c
new file mode 100644
index 000000000000..c5ba71991f2b
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_frmbuf.c
@@ -0,0 +1,1709 @@
+/*
+ * DMAEngine driver for Xilinx Framebuffer IP
+ *
+ * Copyright (C) 2016,2017 Xilinx, Inc. All rights reserved.
+ *
+ * Authors: Radhey Shyam Pandey <radheys@xilinx.com>
+ * John Nichols <jnichol@xilinx.com>
+ * Jeffrey Mouroux <jmouroux@xilinx.com>
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Framebuffer core is a soft Xilinx IP core that
+ * provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/dmapool.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_FRMBUF_CTRL_OFFSET 0x00
+#define XILINX_FRMBUF_GIE_OFFSET 0x04
+#define XILINX_FRMBUF_IE_OFFSET 0x08
+#define XILINX_FRMBUF_ISR_OFFSET 0x0c
+#define XILINX_FRMBUF_WIDTH_OFFSET 0x10
+#define XILINX_FRMBUF_HEIGHT_OFFSET 0x18
+#define XILINX_FRMBUF_STRIDE_OFFSET 0x20
+#define XILINX_FRMBUF_FMT_OFFSET 0x28
+#define XILINX_FRMBUF_ADDR_OFFSET 0x30
+#define XILINX_FRMBUF_ADDR2_OFFSET 0x3c
+#define XILINX_FRMBUF_FID_OFFSET 0x48
+
+/* Control Registers */
+#define XILINX_FRMBUF_CTRL_AP_START BIT(0)
+#define XILINX_FRMBUF_CTRL_AP_DONE BIT(1)
+#define XILINX_FRMBUF_CTRL_AP_IDLE BIT(2)
+#define XILINX_FRMBUF_CTRL_AP_READY BIT(3)
+#define XILINX_FRMBUF_CTRL_FLUSH BIT(5)
+#define XILINX_FRMBUF_CTRL_FLUSH_DONE BIT(6)
+#define XILINX_FRMBUF_CTRL_AUTO_RESTART BIT(7)
+#define XILINX_FRMBUF_GIE_EN BIT(0)
+
+/* Interrupt Status and Control */
+#define XILINX_FRMBUF_IE_AP_DONE BIT(0)
+#define XILINX_FRMBUF_IE_AP_READY BIT(1)
+
+#define XILINX_FRMBUF_ISR_AP_DONE_IRQ BIT(0)
+#define XILINX_FRMBUF_ISR_AP_READY_IRQ BIT(1)
+
+#define XILINX_FRMBUF_ISR_ALL_IRQ_MASK \
+ (XILINX_FRMBUF_ISR_AP_DONE_IRQ | \
+ XILINX_FRMBUF_ISR_AP_READY_IRQ)
+
+/* Video Format Register Settings */
+#define XILINX_FRMBUF_FMT_RGBX8 10
+#define XILINX_FRMBUF_FMT_YUVX8 11
+#define XILINX_FRMBUF_FMT_YUYV8 12
+#define XILINX_FRMBUF_FMT_RGBA8 13
+#define XILINX_FRMBUF_FMT_YUVA8 14
+#define XILINX_FRMBUF_FMT_RGBX10 15
+#define XILINX_FRMBUF_FMT_YUVX10 16
+#define XILINX_FRMBUF_FMT_Y_UV8 18
+#define XILINX_FRMBUF_FMT_Y_UV8_420 19
+#define XILINX_FRMBUF_FMT_RGB8 20
+#define XILINX_FRMBUF_FMT_YUV8 21
+#define XILINX_FRMBUF_FMT_Y_UV10 22
+#define XILINX_FRMBUF_FMT_Y_UV10_420 23
+#define XILINX_FRMBUF_FMT_Y8 24
+#define XILINX_FRMBUF_FMT_Y10 25
+#define XILINX_FRMBUF_FMT_BGRA8 26
+#define XILINX_FRMBUF_FMT_BGRX8 27
+#define XILINX_FRMBUF_FMT_UYVY8 28
+#define XILINX_FRMBUF_FMT_BGR8 29
+#define XILINX_FRMBUF_FMT_RGBX12 30
+#define XILINX_FRMBUF_FMT_RGB16 35
+
+/* FID Register */
+#define XILINX_FRMBUF_FID_MASK BIT(0)
+
+#define XILINX_FRMBUF_ALIGN_MUL 8
+
+#define WAIT_FOR_FLUSH_DONE 25
+
+/* Pixels per clock property flag */
+#define XILINX_PPC_PROP BIT(0)
+#define XILINX_FLUSH_PROP BIT(1)
+#define XILINX_FID_PROP BIT(2)
+#define XILINX_CLK_PROP BIT(3)
+
+#define XILINX_FRMBUF_MAX_HEIGHT (4320)
+#define XILINX_FRMBUF_MIN_HEIGHT (64)
+#define XILINX_FRMBUF_MAX_WIDTH (8192)
+#define XILINX_FRMBUF_MIN_WIDTH (64)
+
+/**
+ * struct xilinx_frmbuf_desc_hw - Hardware Descriptor
+ * @luma_plane_addr: Luma or packed plane buffer address
+ * @chroma_plane_addr: Chroma plane buffer address
+ * @vsize: Vertical Size
+ * @hsize: Horizontal Size
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line
+ */
+struct xilinx_frmbuf_desc_hw {
+ dma_addr_t luma_plane_addr;
+ dma_addr_t chroma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xilinx_frmbuf_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @hw: Hardware descriptor
+ * @node: Node in the channel descriptors list
+ * @fid: Field ID of buffer
+ * @earlycb: Whether the callback should be called when in staged state
+ */
+struct xilinx_frmbuf_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xilinx_frmbuf_desc_hw hw;
+ struct list_head node;
+ u32 fid;
+ u32 earlycb;
+};
+
+/**
+ * struct xilinx_frmbuf_chan - Driver specific dma channel structure
+ * @xdev: Driver specific device structure
+ * @lock: Descriptor operation lock
+ * @chan_node: Member of a list of framebuffer channel instances
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @staged_desc: Next buffer to be programmed
+ * @active_desc: Currently active buffer being read/written to
+ * @common: DMA common channel
+ * @dev: The dma device
+ * @write_addr: callback that will write dma addresses to IP (32 or 64 bit)
+ * @irq: Channel IRQ
+ * @direction: Transfer direction
+ * @idle: Channel idle state
+ * @tasklet: Cleanup work after irq
+ * @vid_fmt: Reference to currently assigned video format description
+ * @hw_fid: FID enabled in hardware flag
+ * @mode: Select operation mode
+ */
+struct xilinx_frmbuf_chan {
+ struct xilinx_frmbuf_device *xdev;
+ /* Descriptor operation lock */
+ spinlock_t lock;
+ struct list_head chan_node;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xilinx_frmbuf_tx_descriptor *staged_desc;
+ struct xilinx_frmbuf_tx_descriptor *active_desc;
+ struct dma_chan common;
+ struct device *dev;
+ void (*write_addr)(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t value);
+ int irq;
+ enum dma_transfer_direction direction;
+ bool idle;
+ struct tasklet_struct tasklet;
+ const struct xilinx_frmbuf_format_desc *vid_fmt;
+ bool hw_fid;
+ enum operation_mode mode;
+};
+
+/**
+ * struct xilinx_frmbuf_format_desc - lookup table to match fourcc to format
+ * @dts_name: Device tree name for this entry.
+ * @id: Format ID
+ * @bpw: Bits of pixel data + padding in a 32-bit word (luma plane for semi-pl)
+ * @ppw: Number of pixels represented in a 32-bit word (luma plane for semi-pl)
+ * @num_planes: Expected number of plane buffers in framebuffer for this format
+ * @drm_fmt: DRM video framework equivalent fourcc code
+ * @v4l2_fmt: Video 4 Linux framework equivalent fourcc code
+ * @fmt_bitmask: Flag identifying this format in device-specific "enabled"
+ * bitmap
+ */
+struct xilinx_frmbuf_format_desc {
+ const char *dts_name;
+ u32 id;
+ u32 bpw;
+ u32 ppw;
+ u32 num_planes;
+ u32 drm_fmt;
+ u32 v4l2_fmt;
+ u32 fmt_bitmask;
+};
+
+static LIST_HEAD(frmbuf_chan_list);
+static DEFINE_MUTEX(frmbuf_chan_list_lock);
+
+static const struct xilinx_frmbuf_format_desc xilinx_frmbuf_formats[] = {
+ {
+ .dts_name = "xbgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGRX32,
+ .fmt_bitmask = BIT(0),
+ },
+ {
+ .dts_name = "xbgr2101010",
+ .id = XILINX_FRMBUF_FMT_RGBX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR30,
+ .fmt_bitmask = BIT(1),
+ },
+ {
+ .dts_name = "xrgb8888",
+ .id = XILINX_FRMBUF_FMT_BGRX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR32,
+ .fmt_bitmask = BIT(2),
+ },
+ {
+ .dts_name = "xvuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVX8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY8888,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY32,
+ .fmt_bitmask = BIT(5),
+ },
+ {
+ .dts_name = "vuy888",
+ .id = XILINX_FRMBUF_FMT_YUV8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_VUY888,
+ .v4l2_fmt = V4L2_PIX_FMT_VUY24,
+ .fmt_bitmask = BIT(6),
+ },
+ {
+ .dts_name = "yuvx2101010",
+ .id = XILINX_FRMBUF_FMT_YUVX10,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_XVUY2101010,
+ .v4l2_fmt = V4L2_PIX_FMT_XVUY10,
+ .fmt_bitmask = BIT(7),
+ },
+ {
+ .dts_name = "yuyv",
+ .id = XILINX_FRMBUF_FMT_YUYV8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ .fmt_bitmask = BIT(8),
+ },
+ {
+ .dts_name = "uyvy",
+ .id = XILINX_FRMBUF_FMT_UYVY8,
+ .bpw = 32,
+ .ppw = 2,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ .fmt_bitmask = BIT(9),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV16,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16M,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv16",
+ .id = XILINX_FRMBUF_FMT_Y_UV8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV16,
+ .fmt_bitmask = BIT(11),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_NV12,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12M,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "nv12",
+ .id = XILINX_FRMBUF_FMT_Y_UV8_420,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_NV12,
+ .fmt_bitmask = BIT(12),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV15,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15M,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv15",
+ .id = XILINX_FRMBUF_FMT_Y_UV10_420,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV15,
+ .fmt_bitmask = BIT(13),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = DRM_FORMAT_XV20,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20M,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "xv20",
+ .id = XILINX_FRMBUF_FMT_Y_UV10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 2,
+ .drm_fmt = 0,
+ .v4l2_fmt = V4L2_PIX_FMT_XV20,
+ .fmt_bitmask = BIT(14),
+ },
+ {
+ .dts_name = "bgr888",
+ .id = XILINX_FRMBUF_FMT_RGB8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ .fmt_bitmask = BIT(15),
+ },
+ {
+ .dts_name = "y8",
+ .id = XILINX_FRMBUF_FMT_Y8,
+ .bpw = 32,
+ .ppw = 4,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y8,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ .fmt_bitmask = BIT(16),
+ },
+ {
+ .dts_name = "y10",
+ .id = XILINX_FRMBUF_FMT_Y10,
+ .bpw = 32,
+ .ppw = 3,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_Y10,
+ .v4l2_fmt = V4L2_PIX_FMT_Y10,
+ .fmt_bitmask = BIT(17),
+ },
+ {
+ .dts_name = "rgb888",
+ .id = XILINX_FRMBUF_FMT_BGR8,
+ .bpw = 24,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ .fmt_bitmask = BIT(18),
+ },
+ {
+ .dts_name = "abgr8888",
+ .id = XILINX_FRMBUF_FMT_RGBA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(19),
+ },
+ {
+ .dts_name = "argb8888",
+ .id = XILINX_FRMBUF_FMT_BGRA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(20),
+ },
+ {
+ .dts_name = "avuy8888",
+ .id = XILINX_FRMBUF_FMT_YUVA8,
+ .bpw = 32,
+ .ppw = 1,
+ .num_planes = 1,
+ .drm_fmt = DRM_FORMAT_AVUY,
+ .v4l2_fmt = 0,
+ .fmt_bitmask = BIT(21),
+ },
+ {
+ .dts_name = "xbgr4121212",
+ .id = XILINX_FRMBUF_FMT_RGBX12,
+ .bpw = 40,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_XBGR40,
+ .fmt_bitmask = BIT(22),
+ },
+ {
+ .dts_name = "rgb16",
+ .id = XILINX_FRMBUF_FMT_RGB16,
+ .bpw = 48,
+ .ppw = 1,
+ .num_planes = 1,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR48,
+ .fmt_bitmask = BIT(23),
+ },
+};
+
+/**
+ * struct xilinx_frmbuf_feature - dt or IP property structure
+ * @direction: dma transfer mode and direction
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xilinx_frmbuf_feature {
+ enum dma_transfer_direction direction;
+ u32 flags;
+};
+
+/**
+ * struct xilinx_frmbuf_device - dma device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific dma channel
+ * @rst_gpio: GPIO reset
+ * @enabled_vid_fmts: Bitmask of video formats enabled in hardware
+ * @drm_memory_fmts: Array of supported DRM fourcc codes
+ * @drm_fmt_cnt: Count of supported DRM fourcc codes
+ * @v4l2_memory_fmts: Array of supported V4L2 fourcc codes
+ * @v4l2_fmt_cnt: Count of supported V4L2 fourcc codes
+ * @cfg: Pointer to Framebuffer Feature config struct
+ * @max_width: Maximum pixel width supported in IP.
+ * @max_height: Maximum number of lines supported in IP.
+ * @ap_clk: Video core clock
+ */
+struct xilinx_frmbuf_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_frmbuf_chan chan;
+ struct gpio_desc *rst_gpio;
+ u32 enabled_vid_fmts;
+ u32 drm_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 drm_fmt_cnt;
+ u32 v4l2_memory_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+ u32 v4l2_fmt_cnt;
+ const struct xilinx_frmbuf_feature *cfg;
+ u32 max_width;
+ u32 max_height;
+ struct clk *ap_clk;
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v20 = {
+ .direction = DMA_DEV_TO_MEM,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbwr_cfg_v21 = {
+ .direction = DMA_DEV_TO_MEM,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v20 = {
+ .direction = DMA_MEM_TO_DEV,
+};
+
+static const struct xilinx_frmbuf_feature xlnx_fbrd_cfg_v21 = {
+ .direction = DMA_MEM_TO_DEV,
+ .flags = XILINX_PPC_PROP | XILINX_FLUSH_PROP
+ | XILINX_FID_PROP | XILINX_CLK_PROP,
+};
+
+static const struct of_device_id xilinx_frmbuf_of_ids[] = {
+ { .compatible = "xlnx,axi-frmbuf-wr-v2",
+ .data = (void *)&xlnx_fbwr_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-wr-v2.1",
+ .data = (void *)&xlnx_fbwr_cfg_v21},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2",
+ .data = (void *)&xlnx_fbrd_cfg_v20},
+ { .compatible = "xlnx,axi-frmbuf-rd-v2.1",
+ .data = (void *)&xlnx_fbrd_cfg_v21},
+ {/* end of list */}
+};
+
+/******************************PROTOTYPES*************************************/
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_frmbuf_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_frmbuf_tx_descriptor, async_tx)
+
+static inline u32 frmbuf_read(struct xilinx_frmbuf_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_write(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void frmbuf_writeq(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u64 value)
+{
+ iowrite32(lower_32_bits(value), chan->xdev->regs + reg);
+ iowrite32(upper_32_bits(value), chan->xdev->regs + reg + 4);
+}
+
+static void writeq_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_writeq(chan, reg, (u64)addr);
+}
+
+static void write_addr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ frmbuf_write(chan, reg, addr);
+}
+
+static inline void frmbuf_clr(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 clr)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) & ~clr);
+}
+
+static inline void frmbuf_set(struct xilinx_frmbuf_chan *chan, u32 reg,
+ u32 set)
+{
+ frmbuf_write(chan, reg, frmbuf_read(chan, reg) | set);
+}
+
+static void frmbuf_init_format_array(struct xilinx_frmbuf_device *xdev)
+{
+ u32 i, cnt;
+
+ for (i = 0; i < ARRAY_SIZE(xilinx_frmbuf_formats); i++) {
+ if (!(xdev->enabled_vid_fmts &
+ xilinx_frmbuf_formats[i].fmt_bitmask))
+ continue;
+
+ if (xilinx_frmbuf_formats[i].drm_fmt) {
+ cnt = xdev->drm_fmt_cnt++;
+ xdev->drm_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].drm_fmt;
+ }
+
+ if (xilinx_frmbuf_formats[i].v4l2_fmt) {
+ cnt = xdev->v4l2_fmt_cnt++;
+ xdev->v4l2_memory_fmts[cnt] =
+ xilinx_frmbuf_formats[i].v4l2_fmt;
+ }
+ }
+}
+
+static struct xilinx_frmbuf_chan *frmbuf_find_chan(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ bool found_xchan = false;
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_for_each_entry(xil_chan, &frmbuf_chan_list, chan_node) {
+ if (chan == &xil_chan->common) {
+ found_xchan = true;
+ break;
+ }
+ }
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ if (!found_xchan) {
+ dev_dbg(chan->device->dev,
+ "dma chan not a Video Framebuffer channel instance\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return xil_chan;
+}
+
+static struct xilinx_frmbuf_device *frmbuf_find_dev(struct dma_chan *chan)
+{
+ struct xilinx_frmbuf_chan *xchan, *temp;
+ struct xilinx_frmbuf_device *xdev;
+ bool is_frmbuf_chan = false;
+
+ list_for_each_entry_safe(xchan, temp, &frmbuf_chan_list, chan_node) {
+ if (chan == &xchan->common)
+ is_frmbuf_chan = true;
+ }
+
+ if (!is_frmbuf_chan)
+ return ERR_PTR(-ENODEV);
+
+ xchan = to_xilinx_chan(chan);
+ xdev = container_of(xchan, struct xilinx_frmbuf_device, chan);
+
+ return xdev;
+}
+
+static int frmbuf_verify_format(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan = to_xilinx_chan(chan);
+ u32 i, sz = ARRAY_SIZE(xilinx_frmbuf_formats);
+
+ for (i = 0; i < sz; i++) {
+ if ((type == XDMA_DRM &&
+ fourcc != xilinx_frmbuf_formats[i].drm_fmt) ||
+ (type == XDMA_V4L2 &&
+ fourcc != xilinx_frmbuf_formats[i].v4l2_fmt))
+ continue;
+
+ if (!(xilinx_frmbuf_formats[i].fmt_bitmask &
+ xil_chan->xdev->enabled_vid_fmts))
+ return -EINVAL;
+
+ /*
+ * The Alpha color formats are supported in Framebuffer Read
+ * IP only as corresponding DRM formats.
+ */
+ if (type == XDMA_DRM &&
+ (xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ABGR8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_ARGB8888 ||
+ xilinx_frmbuf_formats[i].drm_fmt == DRM_FORMAT_AVUY) &&
+ xil_chan->direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ xil_chan->vid_fmt = &xilinx_frmbuf_formats[i];
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void xilinx_xdma_set_config(struct dma_chan *chan, u32 fourcc, u32 type)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+ int ret;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+ ret = frmbuf_verify_format(chan, fourcc, type);
+ if (ret == -EINVAL) {
+ dev_err(chan->device->dev,
+ "Framebuffer not configured for fourcc 0x%x\n",
+ fourcc);
+ return;
+ }
+}
+
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode
+ mode)
+{
+ struct xilinx_frmbuf_chan *xil_chan;
+
+ xil_chan = frmbuf_find_chan(chan);
+ if (IS_ERR(xil_chan))
+ return;
+
+ xil_chan->mode = mode;
+
+ return;
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_set_mode);
+
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{
+ xilinx_xdma_set_config(chan, drm_fourcc, XDMA_DRM);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_drm_config);
+
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc)
+{
+ xilinx_xdma_set_config(chan, v4l2_fourcc, XDMA_V4L2);
+
+} EXPORT_SYMBOL_GPL(xilinx_xdma_v4l2_config);
+
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->drm_fmt_cnt;
+ *fmts = xdev->drm_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_drm_vid_fmts);
+
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts)
+{
+ struct xilinx_frmbuf_device *xdev;
+
+ xdev = frmbuf_find_dev(chan);
+
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ *fmt_cnt = xdev->v4l2_fmt_cnt;
+ *fmts = xdev->v4l2_memory_fmts;
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_v4l2_vid_fmts);
+
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !fid)
+ return -EINVAL;
+
+ if (xdev->chan.direction != DMA_DEV_TO_MEM)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *fid = desc->fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_fid);
+
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (fid > 1 || !async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (xdev->chan.direction != DMA_MEM_TO_DEV)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->fid = fid;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_fid);
+
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ if (!async_tx || !earlycb)
+ return -EINVAL;
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ *earlycb = desc->earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_get_earlycb);
+
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb)
+{
+ struct xilinx_frmbuf_device *xdev;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!async_tx)
+ return -EINVAL;
+
+ xdev = frmbuf_find_dev(chan);
+ if (IS_ERR(xdev))
+ return PTR_ERR(xdev);
+
+ desc = to_dma_tx_descriptor(async_tx);
+ if (!desc)
+ return -EINVAL;
+
+ desc->earlycb = earlycb;
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_xdma_set_earlycb);
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success or error code on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_frmbuf_device *xdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&xdev->chan.common);
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors alloc and free
+ */
+
+/**
+ * xilinx_frmbuf_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific dma channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_frmbuf_tx_descriptor *
+xilinx_frmbuf_alloc_tx_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ return desc;
+}
+
+/**
+ * xilinx_frmbuf_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_frmbuf_free_desc_list(struct xilinx_frmbuf_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xilinx_frmbuf_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_free_descriptors(struct xilinx_frmbuf_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_frmbuf_free_desc_list(chan, &chan->pending_list);
+ xilinx_frmbuf_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+ kfree(chan->staged_desc);
+
+ chan->staged_desc = NULL;
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_free_descriptors(chan);
+}
+
+/**
+ * xilinx_frmbuf_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_desc_cleanup(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ */
+static void xilinx_frmbuf_do_tasklet(unsigned long data)
+{
+ struct xilinx_frmbuf_chan *chan = (struct xilinx_frmbuf_chan *)data;
+
+ xilinx_frmbuf_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_frmbuf_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_tx_status - Get frmbuf transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: fmrbuf transaction status
+ */
+static enum dma_status xilinx_frmbuf_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xilinx_frmbuf_halt - Halt frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_halt(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_clr(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = true;
+}
+
+/**
+ * xilinx_frmbuf_start - Start dma channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_start(struct xilinx_frmbuf_chan *chan)
+{
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_AP_START |
+ chan->mode);
+ chan->idle = false;
+}
+
+/**
+ * xilinx_frmbuf_complete_descriptor - Mark the active descriptor as complete
+ * This function is invoked with spinlock held
+ * @chan : xilinx frmbuf channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_frmbuf_complete_descriptor(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = chan->active_desc;
+
+ /*
+ * In case of frame buffer write, read the fid register
+ * and associate it with descriptor
+ */
+ if (chan->direction == DMA_DEV_TO_MEM && chan->hw_fid)
+ desc->fid = frmbuf_read(chan, XILINX_FRMBUF_FID_OFFSET) &
+ XILINX_FRMBUF_FID_MASK;
+
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * xilinx_frmbuf_start_transfer - Starts frmbuf transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_frmbuf_start_transfer(struct xilinx_frmbuf_chan *chan)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ if (!chan->idle)
+ return;
+
+ if (chan->staged_desc) {
+ chan->active_desc = chan->staged_desc;
+ chan->staged_desc = NULL;
+ }
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_frmbuf_tx_descriptor,
+ node);
+
+ if (desc->earlycb == EARLY_CALLBACK_START_DESC) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ chan->active_desc = desc;
+ }
+ }
+
+ /* Start the transfer */
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR_OFFSET,
+ desc->hw.luma_plane_addr);
+ chan->write_addr(chan, XILINX_FRMBUF_ADDR2_OFFSET,
+ desc->hw.chroma_plane_addr);
+
+ /* HW expects these parameters to be same for one transaction */
+ frmbuf_write(chan, XILINX_FRMBUF_WIDTH_OFFSET, desc->hw.hsize);
+ frmbuf_write(chan, XILINX_FRMBUF_STRIDE_OFFSET, desc->hw.stride);
+ frmbuf_write(chan, XILINX_FRMBUF_HEIGHT_OFFSET, desc->hw.vsize);
+ frmbuf_write(chan, XILINX_FRMBUF_FMT_OFFSET, chan->vid_fmt->id);
+
+ /* If it is framebuffer read IP set the FID */
+ if (chan->direction == DMA_MEM_TO_DEV && chan->hw_fid)
+ frmbuf_write(chan, XILINX_FRMBUF_FID_OFFSET, desc->fid);
+
+ /* Start the hardware */
+ xilinx_frmbuf_start(chan);
+ list_del(&desc->node);
+
+ /* No staging descriptor required when auto restart is disabled */
+ if (chan->mode == AUTO_RESTART)
+ chan->staged_desc = desc;
+ else
+ chan->active_desc = desc;
+}
+
+/**
+ * xilinx_frmbuf_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_frmbuf_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_frmbuf_reset - Reset frmbuf channel
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_reset(struct xilinx_frmbuf_chan *chan)
+{
+ /* reset ip */
+ gpiod_set_value(chan->xdev->rst_gpio, 1);
+ udelay(1);
+ gpiod_set_value(chan->xdev->rst_gpio, 0);
+}
+
+/**
+ * xilinx_frmbuf_chan_reset - Reset frmbuf channel and enable interrupts
+ * @chan: Driver specific frmbuf channel
+ */
+static void xilinx_frmbuf_chan_reset(struct xilinx_frmbuf_chan *chan)
+{
+ xilinx_frmbuf_reset(chan);
+ frmbuf_write(chan, XILINX_FRMBUF_IE_OFFSET, XILINX_FRMBUF_IE_AP_READY);
+ frmbuf_write(chan, XILINX_FRMBUF_GIE_OFFSET, XILINX_FRMBUF_GIE_EN);
+}
+
+/**
+ * xilinx_frmbuf_irq_handler - frmbuf Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx frmbuf channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_frmbuf_irq_handler(int irq, void *data)
+{
+ struct xilinx_frmbuf_chan *chan = data;
+ u32 status;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param;
+ struct xilinx_frmbuf_tx_descriptor *desc;
+
+ status = frmbuf_read(chan, XILINX_FRMBUF_ISR_OFFSET);
+ if (!(status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ frmbuf_write(chan, XILINX_FRMBUF_ISR_OFFSET,
+ status & XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ /* Check if callback function needs to be called early */
+ desc = chan->staged_desc;
+ if (desc && desc->earlycb == EARLY_CALLBACK) {
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ callback(callback_param);
+ desc->async_tx.callback = NULL;
+ }
+ }
+
+ if (status & XILINX_FRMBUF_ISR_AP_READY_IRQ) {
+ spin_lock(&chan->lock);
+ chan->idle = true;
+ if (chan->active_desc) {
+ xilinx_frmbuf_complete_descriptor(chan);
+ chan->active_desc = NULL;
+ }
+ xilinx_frmbuf_start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_frmbuf_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_frmbuf_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_frmbuf_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_frmbuf_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_frmbuf_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_frmbuf_tx_descriptor *desc;
+ struct xilinx_frmbuf_desc_hw *hw;
+ u32 vsize, hsize;
+
+ if (chan->direction != xt->dir || !chan->vid_fmt)
+ goto error;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ goto error;
+
+ if (xt->frame_size != chan->vid_fmt->num_planes)
+ goto error;
+
+ vsize = xt->numf;
+ hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+ /* hsize calc should not have resulted in an odd number */
+ if (hsize & 1)
+ hsize++;
+
+ if (vsize > chan->xdev->max_height || hsize > chan->xdev->max_width) {
+ dev_dbg(chan->xdev->dev,
+ "vsize %d max vsize %d hsize %d max hsize %d\n",
+ vsize, chan->xdev->max_height, hsize,
+ chan->xdev->max_width);
+ dev_err(chan->xdev->dev, "Requested size not supported!\n");
+ goto error;
+ }
+
+ desc = xilinx_frmbuf_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_frmbuf_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ hw = &desc->hw;
+ hw->vsize = xt->numf;
+ hw->stride = xt->sgl[0].icg + xt->sgl[0].size;
+ hw->hsize = (xt->sgl[0].size * chan->vid_fmt->ppw * 8) /
+ chan->vid_fmt->bpw;
+
+ /* hsize calc should not have resulted in an odd number */
+ if (hw->hsize & 1)
+ hw->hsize++;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ hw->luma_plane_addr = xt->src_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->src_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].src_icg;
+ } else {
+ hw->luma_plane_addr = xt->dst_start;
+ if (xt->frame_size == 2)
+ hw->chroma_plane_addr =
+ xt->dst_start +
+ xt->numf * hw->stride +
+ xt->sgl[0].dst_icg;
+ }
+
+ return &desc->async_tx;
+
+error:
+ dev_err(chan->xdev->dev,
+ "Invalid dma template or missing dma video fmt config\n");
+ return NULL;
+}
+
+/**
+ * xilinx_frmbuf_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xilinx_frmbuf_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ xilinx_frmbuf_halt(chan);
+ xilinx_frmbuf_free_descriptors(chan);
+ /* worst case frame-to-frame boundary; ensure frame output complete */
+ msleep(50);
+
+ if (chan->xdev->cfg->flags & XILINX_FLUSH_PROP) {
+ u8 count;
+
+ /*
+ * Flush the framebuffer FIFO and
+ * wait for max 50ms for flush done
+ */
+ frmbuf_set(chan, XILINX_FRMBUF_CTRL_OFFSET,
+ XILINX_FRMBUF_CTRL_FLUSH);
+ for (count = WAIT_FOR_FLUSH_DONE; count > 0; count--) {
+ if (frmbuf_read(chan, XILINX_FRMBUF_CTRL_OFFSET) &
+ XILINX_FRMBUF_CTRL_FLUSH_DONE)
+ break;
+ usleep_range(2000, 2100);
+ }
+
+ if (!count)
+ dev_err(chan->xdev->dev, "Framebuffer Flush not done!\n");
+ }
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_synchronize - kill tasklet to stop further descr processing
+ * @dchan: Driver specific dma channel pointer
+ */
+static void xilinx_frmbuf_synchronize(struct dma_chan *dchan)
+{
+ struct xilinx_frmbuf_chan *chan = to_xilinx_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_frmbuf_chan_remove - Per Channel remove function
+ * @chan: Driver specific dma channel
+ */
+static void xilinx_frmbuf_chan_remove(struct xilinx_frmbuf_chan *chan)
+{
+ /* Disable all interrupts */
+ frmbuf_clr(chan, XILINX_FRMBUF_IE_OFFSET,
+ XILINX_FRMBUF_ISR_ALL_IRQ_MASK);
+
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_del(&chan->chan_node);
+ mutex_unlock(&frmbuf_chan_list_lock);
+}
+
+/**
+ * xilinx_frmbuf_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_chan_probe(struct xilinx_frmbuf_device *xdev,
+ struct device_node *node)
+{
+ struct xilinx_frmbuf_chan *chan;
+ int err;
+ u32 dma_addr_size;
+
+ chan = &xdev->chan;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->idle = true;
+ chan->mode = AUTO_RESTART;
+
+ err = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &dma_addr_size);
+ if (err || (dma_addr_size != 32 && dma_addr_size != 64)) {
+ dev_err(xdev->dev, "missing or invalid addr width dts prop\n");
+ return err;
+ }
+
+ if (dma_addr_size == 64 && sizeof(dma_addr_t) == sizeof(u64))
+ chan->write_addr = writeq_addr;
+ else
+ chan->write_addr = write_addr;
+
+ if (xdev->cfg->flags & XILINX_FID_PROP)
+ chan->hw_fid = of_property_read_bool(node, "xlnx,fid");
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(xdev->dev, chan->irq, xilinx_frmbuf_irq_handler,
+ IRQF_SHARED, "xilinx_framebuffer", chan);
+
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ tasklet_init(&chan->tasklet, xilinx_frmbuf_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+
+ mutex_lock(&frmbuf_chan_list_lock);
+ list_add_tail(&chan->chan_node, &frmbuf_chan_list);
+ mutex_unlock(&frmbuf_chan_list_lock);
+
+ xilinx_frmbuf_chan_reset(chan);
+
+ return 0;
+}
+
+/**
+ * xilinx_frmbuf_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_frmbuf_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_frmbuf_device *xdev;
+ struct resource *io;
+ enum dma_transfer_direction dma_dir;
+ const struct of_device_id *match;
+ int err;
+ u32 i, j, align, ppc;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(xilinx_frmbuf_formats)];
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+
+ match = of_match_node(xilinx_frmbuf_of_ids, node);
+ if (!match)
+ return -ENODEV;
+
+ xdev->cfg = match->data;
+
+ dma_dir = (enum dma_transfer_direction)xdev->cfg->direction;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ xdev->ap_clk = devm_clk_get(xdev->dev, "ap_clk");
+ if (IS_ERR(xdev->ap_clk)) {
+ err = PTR_ERR(xdev->ap_clk);
+ dev_err(xdev->dev, "failed to get ap_clk (%d)\n", err);
+ return err;
+ }
+ } else {
+ dev_info(xdev->dev, "assuming clock is enabled!\n");
+ }
+
+ xdev->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xdev->rst_gpio)) {
+ err = PTR_ERR(xdev->rst_gpio);
+ if (err == -EPROBE_DEFER)
+ dev_info(&pdev->dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(&pdev->dev,
+ "Unable to locate reset property in dt\n");
+ return err;
+ }
+
+ gpiod_set_value_cansleep(xdev->rst_gpio, 0x0);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ err = of_property_read_u32(node, "xlnx,max-height", &xdev->max_height);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xdev->max_height > XILINX_FRMBUF_MAX_HEIGHT ||
+ xdev->max_height < XILINX_FRMBUF_MIN_HEIGHT) {
+ dev_err(&pdev->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(node, "xlnx,max-width", &xdev->max_width);
+ if (err < 0) {
+ dev_err(xdev->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xdev->max_width > XILINX_FRMBUF_MAX_WIDTH ||
+ xdev->max_width < XILINX_FRMBUF_MIN_WIDTH) {
+ dev_err(&pdev->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ /* Initialize the DMA engine */
+ if (xdev->cfg->flags & XILINX_PPC_PROP) {
+ err = of_property_read_u32(node, "xlnx,pixels-per-clock", &ppc);
+ if (err || (ppc != 1 && ppc != 2 && ppc != 4 && ppc != 8)) {
+ dev_err(&pdev->dev, "missing or invalid pixels per clock dts prop\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,dma-align", &align);
+ if (err)
+ align = ppc * XILINX_FRMBUF_ALIGN_MUL;
+
+ if (align < (ppc * XILINX_FRMBUF_ALIGN_MUL) ||
+ ffs(align) != fls(align)) {
+ dev_err(&pdev->dev, "invalid dma align dts prop\n");
+ return -EINVAL;
+ }
+ } else {
+ align = 16;
+ }
+
+ xdev->common.copy_align = fls(align) - 1;
+ xdev->common.dev = &pdev->dev;
+
+ if (xdev->cfg->flags & XILINX_CLK_PROP) {
+ err = clk_prepare_enable(xdev->ap_clk);
+ if (err) {
+ dev_err(&pdev->dev, " failed to enable ap_clk (%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+
+ /* Initialize the channels */
+ err = xilinx_frmbuf_chan_probe(xdev, node);
+ if (err < 0)
+ goto disable_clk;
+
+ xdev->chan.direction = dma_dir;
+
+ if (xdev->chan.direction == DMA_DEV_TO_MEM) {
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_DEV_TO_MEM\n");
+ } else if (xdev->chan.direction == DMA_MEM_TO_DEV) {
+ xdev->common.directions = BIT(DMA_MEM_TO_DEV);
+ dev_info(&pdev->dev, "Xilinx AXI frmbuf DMA_MEM_TO_DEV\n");
+ } else {
+ err = -EINVAL;
+ goto remove_chan;
+ }
+
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ err = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ goto remove_chan;
+ }
+
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(xilinx_frmbuf_formats); j++) {
+ const char *dts_name =
+ xilinx_frmbuf_formats[j].dts_name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+
+ xdev->enabled_vid_fmts |=
+ xilinx_frmbuf_formats[j].fmt_bitmask;
+ }
+ }
+
+ /* Determine supported vid framework formats */
+ frmbuf_init_format_array(xdev);
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_frmbuf_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_frmbuf_free_chan_resources;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_frmbuf_dma_prep_interleaved;
+ xdev->common.device_terminate_all = xilinx_frmbuf_terminate_all;
+ xdev->common.device_synchronize = xilinx_frmbuf_synchronize;
+ xdev->common.device_tx_status = xilinx_frmbuf_tx_status;
+ xdev->common.device_issue_pending = xilinx_frmbuf_issue_pending;
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI FrameBuffer Engine Driver Probed!!\n");
+
+ return 0;
+error:
+ dma_async_device_unregister(&xdev->common);
+remove_chan:
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+disable_clk:
+ clk_disable_unprepare(xdev->ap_clk);
+ return err;
+}
+
+/**
+ * xilinx_frmbuf_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_frmbuf_remove(struct platform_device *pdev)
+{
+ struct xilinx_frmbuf_device *xdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&xdev->common);
+ xilinx_frmbuf_chan_remove(&xdev->chan);
+ clk_disable_unprepare(xdev->ap_clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, xilinx_frmbuf_of_ids);
+
+static struct platform_driver xilinx_frmbuf_driver = {
+ .driver = {
+ .name = "xilinx-frmbuf",
+ .of_match_table = xilinx_frmbuf_of_ids,
+ },
+ .probe = xilinx_frmbuf_probe,
+ .remove = xilinx_frmbuf_remove,
+};
+
+module_platform_driver(xilinx_frmbuf_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx Framebuffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie.h b/drivers/dma/xilinx/xilinx_ps_pcie.h
new file mode 100644
index 000000000000..81d634d15447
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie.h
@@ -0,0 +1,44 @@
+/*
+ * Xilinx PS PCIe DMA Engine platform header file
+ *
+ * Copyright (C) 2010-2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __XILINX_PS_PCIE_H
+#define __XILINX_PS_PCIE_H
+
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqreturn.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+/**
+ * dma_platform_driver_register - This will be invoked by module init
+ *
+ * Return: returns status of platform_driver_register
+ */
+int dma_platform_driver_register(void);
+/**
+ * dma_platform_driver_unregister - This will be invoked by module exit
+ *
+ * Return: returns void after unregustering platform driver
+ */
+void dma_platform_driver_unregister(void);
+
+#endif
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
new file mode 100644
index 000000000000..2996133837f0
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_dma_client.c
@@ -0,0 +1,1402 @@
+/*
+ * XILINX PS PCIe DMA Engine test module
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cdev.h>
+#include <linux/dma-direction.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/dma/xilinx_ps_pcie_dma.h>
+
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma_client"
+
+#define DMA_SCRATCH0_REG_OFFSET (0x50)
+#define DMA_SCRATCH1_REG_OFFSET (0x54)
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74)
+
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define DMA_BAR_NUMBER 0
+
+#define CHAR_DRIVER_NAME "ps_pcie_dmachan"
+
+#define PIO_CHAR_DRIVER_NAME "ps_pcie_pio"
+#define EP_TRANSLATION_CHECK 0xCCCCCCCC
+
+#define PIO_MEMORY_BAR_NUMBER 2
+
+#define XPIO_CLIENT_MAGIC 'P'
+#define IOCTL_EP_CHECK_TRANSLATION _IO(XPIO_CLIENT_MAGIC, 0x01)
+
+#define XPS_PCIE_DMA_CLIENT_MAGIC 'S'
+
+#define IGET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x01)
+#define ISET_ASYNC_TRANSFERINFO _IO(XPS_PCIE_DMA_CLIENT_MAGIC, 0x02)
+
+#define DMA_TRANSACTION_SUCCESSFUL 1
+#define DMA_TRANSACTION_FAILURE 0
+
+#define MAX_LIST 1024
+
+struct dma_transfer_info {
+ char __user *buff_address;
+ unsigned int buff_size;
+ loff_t offset;
+ enum dma_data_direction direction;
+};
+
+struct buff_info {
+ bool status;
+ unsigned int buff_size;
+ char __user *buff_address;
+};
+
+struct usrbuff_info {
+ struct buff_info buff_list[MAX_LIST];
+ unsigned int expected;
+};
+
+enum pio_status {
+ PIO_SUPPORTED = 0,
+ PIO_NOT_SUPPORTED
+};
+
+enum dma_transfer_mode {
+ MEMORY_MAPPED = 0,
+ STREAMING
+};
+
+struct dma_deviceproperties {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ enum pio_status pio_transfers;
+ enum dma_transfer_mode mode;
+ enum dma_data_direction direction[MAX_ALLOWED_CHANNELS_IN_HW];
+};
+
+struct xlnx_completed_info {
+ struct list_head clist;
+ struct buff_info buffer;
+};
+
+struct xlnx_ps_pcie_dma_client_channel {
+ struct device *dev;
+ struct dma_chan *chan;
+ struct ps_pcie_dma_channel_match match;
+ enum dma_data_direction direction;
+ enum dma_transfer_mode mode;
+ struct xlnx_completed_info completed;
+ spinlock_t channel_lock; /* Lock to serialize transfers on channel */
+};
+
+struct xlnx_ps_pcie_dma_client_device {
+ struct dma_deviceproperties *properties;
+
+ struct xlnx_ps_pcie_dma_client_channel
+ pcie_dma_chan[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t char_device;
+ struct cdev xps_pcie_chardev;
+ struct device *chardev[MAX_ALLOWED_CHANNELS_IN_HW];
+
+ dev_t pio_char_device;
+ struct cdev xpio_char_dev;
+ struct device *xpio_char_device;
+ struct mutex pio_chardev_mutex; /* Exclusive access to ioctl */
+ struct completion trans_cmpltn;
+ u32 pio_translation_size;
+
+ struct list_head dev_node;
+};
+
+struct xlnx_ps_pcie_dma_asynchronous_transaction {
+ dma_cookie_t cookie;
+ struct page **cache_pages;
+ unsigned int num_pages;
+ struct sg_table *sg;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct xlnx_completed_info *buffer_info;
+ struct dma_async_tx_descriptor **txd;
+};
+
+static struct class *g_ps_pcie_dma_client_class; /* global device class */
+static struct list_head g_ps_pcie_dma_client_list;
+
+/*
+ * Keep adding to this list to interact with multiple DMA devices
+ */
+static struct dma_deviceproperties g_dma_deviceproperties_list[] = {
+ {
+ .pci_vendorid = PCI_VENDOR_ID_XILINX,
+ .pci_deviceid = ZYNQMP_DMA_DEVID,
+ .board_number = 0,
+ .pio_transfers = PIO_SUPPORTED,
+ .mode = MEMORY_MAPPED,
+ /* Make sure the channel direction is same
+ * as what is configured in DMA device
+ */
+ .direction = {DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ DMA_TO_DEVICE, DMA_FROM_DEVICE}
+ }
+};
+
+/**
+ * ps_pcie_dma_sync_transfer_cbk - Callback handler for Synchronous transfers.
+ * Handles both S2C and C2S transfer call backs.
+ * Indicates to blocked applications that DMA transfers are complete
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_sync_transfer_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+/**
+ * initiate_sync_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions waits until transaction completion is notified
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static ssize_t initiate_sync_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ const char __user *buffer, size_t length,
+ loff_t *f_offset, enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie = 0;
+ enum dma_ctrl_flags flags = 0;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct completion *cmpl_ptr;
+ enum dma_status status;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ dev_err(channel->dev,
+ "Unable to allocate memory for page table holder\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev, "Unable to map buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ cmpl_ptr = devm_kzalloc(channel->dev, sizeof(struct completion),
+ GFP_ATOMIC);
+ if (!cmpl_ptr) {
+ err = PTR_ERR(cmpl_ptr);
+ goto err_out_cmpl_ptr;
+ }
+
+ init_completion(cmpl_ptr);
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev, sizeof(*txd)
+ * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_cmpl_ptr;
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_sync_transfer_cbk;
+ txd[i]->callback_param = cmpl_ptr;
+ }
+
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+ } else {
+ txd[0]->callback = ps_pcie_dma_sync_transfer_cbk;
+ txd[0]->callback_param = cmpl_ptr;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+ }
+
+ dma_async_issue_pending(chan);
+
+ wait_for_completion_killable(cmpl_ptr);
+
+ status = dmaengine_tx_status(chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ err = length;
+ else
+ err = -1;
+
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+ sg_free_table(sg);
+ devm_kfree(channel->dev, sg);
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+ devm_kfree(channel->dev, cache_pages);
+
+ return (ssize_t)err;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, cmpl_ptr);
+ devm_kfree(channel->dev, txd);
+err_out_cmpl_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return (ssize_t)err;
+}
+
+static ssize_t
+ps_pcie_dma_read(struct file *file,
+ char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_FROM_DEVICE) {
+ dev_err(chan->dev, "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto c2s_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_FROM_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Read synchronous transfer unsuccessful\n");
+
+c2s_err_direction:
+ return ret;
+}
+
+static ssize_t
+ps_pcie_dma_write(struct file *file,
+ const char __user *buffer,
+ size_t length,
+ loff_t *f_offset)
+{
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ ssize_t ret;
+
+ chan = file->private_data;
+
+ if (chan->direction != DMA_TO_DEVICE) {
+ dev_err(chan->dev,
+ "Invalid data direction for channel\n");
+ ret = -EINVAL;
+ goto s2c_err_direction;
+ }
+
+ ret = initiate_sync_transfer(chan, buffer, length, f_offset,
+ DMA_TO_DEVICE);
+
+ if (ret != length)
+ dev_dbg(chan->dev, "Write synchronous transfer unsuccessful\n");
+
+s2c_err_direction:
+ return ret;
+}
+
+static int ps_pcie_dma_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int minor_num = iminor(in);
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xps_pcie_chardev);
+
+ file->private_data = &xdev->pcie_dma_chan[minor_num];
+
+ return 0;
+}
+
+static int ps_pcie_dma_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static int update_completed_info(struct xlnx_ps_pcie_dma_client_channel *chan,
+ struct usrbuff_info *usr_buff)
+{
+ int retval = 0;
+ unsigned int expected, count = 0;
+ struct xlnx_completed_info *entry;
+ struct xlnx_completed_info *next;
+
+ if (list_empty(&chan->completed.clist))
+ goto update_expected;
+
+ if (copy_from_user((void *)&expected,
+ (void __user *)&usr_buff->expected,
+ sizeof(unsigned int)) != 0) {
+ pr_err("Expected count copy failure\n");
+ retval = -ENXIO;
+ return retval;
+ }
+
+ if (expected > MAX_LIST) {
+ retval = -ENXIO;
+ return retval;
+ }
+
+ list_for_each_entry_safe(entry, next, &chan->completed.clist, clist) {
+ if (copy_to_user((void __user *)(usr_buff->buff_list + count),
+ (void *)&entry->buffer,
+ sizeof(struct buff_info)) != 0) {
+ pr_err("update user completed count copy failed\n");
+ retval = -ENXIO;
+ break;
+ }
+ count++;
+ spin_lock(&chan->channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&chan->channel_lock);
+ devm_kfree(chan->dev, entry);
+ if (count == expected)
+ break;
+ }
+
+update_expected:
+ if (copy_to_user((void __user *)&usr_buff->expected, (void *)&count,
+ (sizeof(unsigned int))) != 0) {
+ pr_err("update user expected count copy failure\n");
+ retval = -ENXIO;
+ }
+
+ return retval;
+}
+
+/**
+ * ps_pcie_dma_async_transfer_cbk - Callback handler for Asynchronous transfers.
+ * Handles both S2C and C2S transfer call backs. Stores transaction information
+ * in a list for a user application to poll for this information
+ *
+ * @data: Callback parameter
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_async_transfer_cbk(void *data)
+{
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans =
+ (struct xlnx_ps_pcie_dma_asynchronous_transaction *)data;
+ enum dma_status status;
+ struct dma_tx_state state;
+ unsigned int i;
+
+ dma_unmap_sg(trans->chan->dev, trans->sg->sgl, trans->sg->nents,
+ trans->chan->direction);
+ sg_free_table(trans->sg);
+ devm_kfree(trans->chan->dev, trans->sg);
+ devm_kfree(trans->chan->dev, trans->txd);
+ for (i = 0; i < trans->num_pages; i++)
+ put_page(trans->cache_pages[i]);
+ devm_kfree(trans->chan->dev, trans->cache_pages);
+
+ status = dmaengine_tx_status(trans->chan->chan, trans->cookie, &state);
+
+ if (status == DMA_COMPLETE)
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+ else
+ trans->buffer_info->buffer.status = DMA_TRANSACTION_SUCCESSFUL;
+
+ spin_lock(&trans->chan->channel_lock);
+ list_add_tail(&trans->buffer_info->clist,
+ &trans->chan->completed.clist);
+ spin_unlock(&trans->chan->channel_lock);
+ devm_kfree(trans->chan->dev, trans);
+}
+
+/**
+ * initiate_async_transfer - Programs both Source Q
+ * and Destination Q of channel after setting up sg lists and transaction
+ * specific data. This functions returns after setting up transfer
+ *
+ * @channel: Pointer to the PS PCIe DMA channel structure
+ * @buffer: User land virtual address containing data to be sent or received
+ * @length: Length of user land buffer
+ * @f_offset: AXI domain address to which data pointed by user buffer has to
+ * be sent/received from
+ * @direction: Transfer of data direction
+ *
+ * Return: 0 on success and non zero value for failure
+ */
+static int initiate_async_transfer(
+ struct xlnx_ps_pcie_dma_client_channel *channel,
+ char __user *buffer, size_t length, loff_t *f_offset,
+ enum dma_data_direction direction)
+{
+ int offset;
+ unsigned int alloc_pages;
+ unsigned long first, last, nents = 1;
+ struct page **cache_pages;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ struct dma_async_tx_descriptor **txd = NULL;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = 0;
+ struct xlnx_ps_pcie_dma_asynchronous_transaction *trans;
+ int err;
+ struct sg_table *sg;
+ enum dma_transfer_direction d_direction;
+ int i;
+ struct scatterlist *selem;
+ size_t elem_len = 0;
+
+ chan = channel->chan;
+ device = chan->device;
+
+ offset = offset_in_page(buffer);
+ first = ((unsigned long)buffer & PAGE_MASK) >> PAGE_SHIFT;
+ last = (((unsigned long)buffer + length - 1) & PAGE_MASK) >>
+ PAGE_SHIFT;
+ alloc_pages = (last - first) + 1;
+
+ cache_pages = devm_kzalloc(channel->dev,
+ (alloc_pages * (sizeof(struct page *))),
+ GFP_ATOMIC);
+ if (!cache_pages) {
+ err = PTR_ERR(cache_pages);
+ goto err_out_cachepages_alloc;
+ }
+
+ err = get_user_pages_fast((unsigned long)buffer, alloc_pages,
+ !(direction), cache_pages);
+ if (err <= 0) {
+ dev_err(channel->dev, "Unable to pin user pages\n");
+ err = PTR_ERR(cache_pages);
+ goto err_out_pin_pages;
+ } else if (err < alloc_pages) {
+ dev_err(channel->dev, "Only pinned few user pages %d\n", err);
+ err = PTR_ERR(cache_pages);
+ for (i = 0; i < err; i++)
+ put_page(cache_pages[i]);
+ goto err_out_pin_pages;
+ }
+
+ sg = devm_kzalloc(channel->dev, sizeof(struct sg_table), GFP_ATOMIC);
+ if (!sg) {
+ err = PTR_ERR(sg);
+ goto err_out_alloc_sg_table;
+ }
+
+ err = sg_alloc_table_from_pages(sg, cache_pages, alloc_pages, offset,
+ length, GFP_ATOMIC);
+ if (err < 0) {
+ dev_err(channel->dev, "Unable to create sg table\n");
+ goto err_out_sg_to_sgl;
+ }
+
+ err = dma_map_sg(channel->dev, sg->sgl, sg->nents, direction);
+ if (err == 0) {
+ dev_err(channel->dev,
+ "Unable to map user buffer to sg table\n");
+ err = PTR_ERR(sg);
+ goto err_out_dma_map_sg;
+ }
+
+ trans = devm_kzalloc(channel->dev, sizeof(*trans), GFP_ATOMIC);
+ if (!trans) {
+ err = PTR_ERR(trans);
+ goto err_out_trans_ptr;
+ }
+
+ trans->buffer_info = devm_kzalloc(channel->dev,
+ sizeof(struct xlnx_completed_info),
+ GFP_ATOMIC);
+
+ if (!trans->buffer_info) {
+ err = PTR_ERR(trans->buffer_info);
+ goto err_out_no_completion_info;
+ }
+
+ if (channel->mode == MEMORY_MAPPED)
+ nents = sg->nents;
+
+ txd = devm_kzalloc(channel->dev,
+ sizeof(*txd) * nents, GFP_ATOMIC);
+ if (!txd) {
+ err = PTR_ERR(txd);
+ goto err_out_no_completion_info;
+ }
+
+ trans->txd = txd;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0, selem = (sg->sgl); i < sg->nents; i++,
+ selem = sg_next(selem)) {
+ if ((i + 1) == sg->nents)
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+
+ if (direction == DMA_TO_DEVICE) {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->dma_address, selem->length,
+ flags);
+ } else {
+ txd[i] = device->device_prep_dma_memcpy(chan,
+ selem->dma_address,
+ (dma_addr_t)(*f_offset) + elem_len,
+ selem->length, flags);
+ }
+
+ elem_len += selem->length;
+
+ if (!txd[i]) {
+ err = PTR_ERR(txd[i]);
+ goto err_out_no_prep_sg_async_desc;
+ }
+ }
+ } else {
+ if (direction == DMA_TO_DEVICE)
+ d_direction = DMA_MEM_TO_DEV;
+ else
+ d_direction = DMA_DEV_TO_MEM;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ txd[0] = device->device_prep_slave_sg(chan, sg->sgl, sg->nents,
+ d_direction, flags, NULL);
+ if (!txd[0]) {
+ err = PTR_ERR(txd[0]);
+ goto err_out_no_slave_sg_async_descriptor;
+ }
+ }
+
+ trans->buffer_info->buffer.buff_address = buffer;
+ trans->buffer_info->buffer.buff_size = length;
+ trans->cache_pages = cache_pages;
+ trans->num_pages = alloc_pages;
+ trans->chan = channel;
+ trans->sg = sg;
+
+ if (channel->mode == MEMORY_MAPPED) {
+ for (i = 0; i < sg->nents; i++) {
+ cookie = txd[i]->tx_submit(txd[i]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ if ((i + 1) == sg->nents) {
+ txd[i]->callback =
+ ps_pcie_dma_async_transfer_cbk;
+ txd[i]->callback_param = trans;
+ trans->cookie = cookie;
+ }
+ }
+
+ } else {
+ txd[0]->callback = ps_pcie_dma_async_transfer_cbk;
+ txd[0]->callback_param = trans;
+
+ cookie = txd[0]->tx_submit(txd[0]);
+ if (dma_submit_error(cookie)) {
+ err = (int)cookie;
+ dev_err(channel->dev,
+ "Unable to submit transaction\n");
+ goto free_transaction;
+ }
+
+ trans->cookie = cookie;
+ }
+
+ dma_async_issue_pending(chan);
+
+ return length;
+
+free_transaction:
+err_out_no_prep_sg_async_desc:
+err_out_no_slave_sg_async_descriptor:
+ devm_kfree(channel->dev, trans->buffer_info);
+ devm_kfree(channel->dev, txd);
+err_out_no_completion_info:
+ devm_kfree(channel->dev, trans);
+err_out_trans_ptr:
+ dma_unmap_sg(channel->dev, sg->sgl, sg->nents, direction);
+err_out_dma_map_sg:
+ sg_free_table(sg);
+err_out_sg_to_sgl:
+ devm_kfree(channel->dev, sg);
+err_out_alloc_sg_table:
+ for (i = 0; i < alloc_pages; i++)
+ put_page(cache_pages[i]);
+err_out_pin_pages:
+ devm_kfree(channel->dev, cache_pages);
+err_out_cachepages_alloc:
+
+ return err;
+}
+
+static long ps_pcie_dma_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int retval = 0;
+ struct xlnx_ps_pcie_dma_client_channel *chan;
+ struct dma_transfer_info transfer_info;
+
+ if (_IOC_TYPE(cmd) != XPS_PCIE_DMA_CLIENT_MAGIC)
+ return -ENOTTY;
+
+ chan = filp->private_data;
+
+ switch (cmd) {
+ case ISET_ASYNC_TRANSFERINFO:
+ if (copy_from_user((void *)&transfer_info,
+ (void __user *)arg,
+ sizeof(struct dma_transfer_info)) != 0) {
+ pr_err("Copy from user asynchronous params\n");
+ retval = -ENXIO;
+ return retval;
+ }
+ if (transfer_info.direction != chan->direction) {
+ retval = -EINVAL;
+ return retval;
+ }
+ retval = initiate_async_transfer(chan,
+ transfer_info.buff_address,
+ transfer_info.buff_size,
+ &transfer_info.offset,
+ transfer_info.direction);
+ break;
+ case IGET_ASYNC_TRANSFERINFO:
+ retval = update_completed_info(chan,
+ (struct usrbuff_info *)arg);
+ break;
+ default:
+ pr_err("Unsupported ioctl command received\n");
+ retval = -1;
+ }
+
+ return (long)retval;
+}
+
+static const struct file_operations ps_pcie_dma_comm_fops = {
+ .owner = THIS_MODULE,
+ .read = ps_pcie_dma_read,
+ .write = ps_pcie_dma_write,
+ .unlocked_ioctl = ps_pcie_dma_ioctl,
+ .open = ps_pcie_dma_open,
+ .release = ps_pcie_dma_release,
+};
+
+static void pio_sw_intr_cbk(void *data)
+{
+ struct completion *compl = (struct completion *)data;
+
+ if (compl)
+ complete(compl);
+}
+
+static long pio_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ char *bar_memory = NULL;
+ u32 translation_size = 0;
+ long err = 0;
+ struct dma_async_tx_descriptor *intr_txd = NULL;
+ dma_cookie_t cookie;
+ struct dma_chan *chan = NULL;
+ struct dma_device *device;
+ enum dma_ctrl_flags flags;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = filp->private_data;
+ chan = xdev->pcie_dma_chan[0].chan;
+ device = chan->device;
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+ xlnx_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ DMA_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ xdev = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_EP_CHECK_TRANSLATION:
+
+ mutex_lock(&xdev->pio_chardev_mutex);
+ reinit_completion(&xdev->trans_cmpltn);
+
+ intr_txd = device->device_prep_dma_interrupt(chan, flags);
+ if (!intr_txd) {
+ err = -EAGAIN;
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ intr_txd->callback = pio_sw_intr_cbk;
+ intr_txd->callback_param = &xdev->trans_cmpltn;
+
+ cookie = intr_txd->tx_submit(intr_txd);
+ if (dma_submit_error(cookie)) {
+ err = cookie;
+ pr_err("Unable to submit interrupt transaction\n");
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ return err;
+ }
+
+ dma_async_issue_pending(chan);
+
+ iowrite32(EP_TRANSLATION_CHECK, (void __iomem *)(bar_memory +
+ DMA_SCRATCH0_REG_OFFSET));
+ iowrite32(DMA_SW_INTR_ASSRT_BIT, (void __iomem *)(bar_memory +
+ DMA_AXI_INTR_ASSRT_REG_OFFSET));
+
+ wait_for_completion_interruptible(&xdev->trans_cmpltn);
+ translation_size = ioread32((void __iomem *)bar_memory +
+ DMA_SCRATCH1_REG_OFFSET);
+ if (translation_size > 0)
+ xdev->pio_translation_size = translation_size;
+ else
+ err = -EAGAIN;
+ iowrite32(0, (void __iomem *)(bar_memory +
+ DMA_SCRATCH1_REG_OFFSET));
+ mutex_unlock(&xdev->pio_chardev_mutex);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static ssize_t
+pio_read(struct file *file, char __user *buffer, size_t length,
+ loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO read\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_to_user(buffer, bar_memory, length);
+ if (num_bytes != 0) {
+ pr_err("Error! copy_to_user failed at PIO read\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static ssize_t
+pio_write(struct file *file, const char __user *buffer,
+ size_t length, loff_t *f_offset)
+{
+ char *bar_memory = NULL;
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+ ssize_t num_bytes = 0;
+ struct BAR_PARAMS *barinfo;
+
+ xdev = file->private_data;
+ xlnx_match = (struct ps_pcie_dma_channel_match *)
+ xdev->pcie_dma_chan[0].chan->private;
+
+ barinfo = ((struct BAR_PARAMS *)(xlnx_match->bar_params) +
+ PIO_MEMORY_BAR_NUMBER);
+ bar_memory = (__force char *)barinfo->BAR_VIRT_ADDR;
+
+ if (length > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer length supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ if ((length + *f_offset)
+ > xdev->pio_translation_size) {
+ pr_err("Error! Invalid buffer offset supplied at PIO write\n");
+ num_bytes = -1;
+ return num_bytes;
+ }
+
+ bar_memory += *f_offset;
+
+ num_bytes = copy_from_user(bar_memory, buffer, length);
+
+ if (num_bytes != 0) {
+ pr_err("Error! copy_from_user failed at PIO write\n");
+ num_bytes = length - num_bytes;
+ } else {
+ num_bytes = length;
+ }
+
+ return num_bytes;
+}
+
+static int pio_open(struct inode *in, struct file *file)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+
+ xdev = container_of(in->i_cdev,
+ struct xlnx_ps_pcie_dma_client_device,
+ xpio_char_dev);
+
+ file->private_data = xdev;
+
+ return 0;
+}
+
+static int pio_release(struct inode *in, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations ps_pcie_pio_fops = {
+ .owner = THIS_MODULE,
+ .read = pio_read,
+ .write = pio_write,
+ .unlocked_ioctl = pio_ioctl,
+ .open = pio_open,
+ .release = pio_release,
+};
+
+static void destroy_char_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->pio_char_device), 0));
+ cdev_del(&xdev->xpio_char_dev);
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+}
+
+static void destroy_char_iface_for_dma(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+ struct xlnx_completed_info *entry, *next;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ list_for_each_entry_safe(entry, next,
+ &xdev->pcie_dma_chan[i].completed.clist,
+ clist) {
+ spin_lock(&xdev->pcie_dma_chan[i].channel_lock);
+ list_del(&entry->clist);
+ spin_unlock(&xdev->pcie_dma_chan[i].channel_lock);
+ kfree(entry);
+ }
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+}
+
+static void delete_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ destroy_char_iface_for_dma(xdev);
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED)
+ destroy_char_iface_for_pio(xdev);
+}
+
+static void release_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+}
+
+static void delete_char_devices(void)
+{
+ struct xlnx_ps_pcie_dma_client_device *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &g_ps_pcie_dma_client_list,
+ dev_node) {
+ list_del(&entry->dev_node);
+ delete_char_dev_interfaces(entry);
+ release_dma_channels(entry);
+ kfree(entry);
+ }
+}
+
+static bool ps_pcie_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct ps_pcie_dma_channel_match *client_match =
+ (struct ps_pcie_dma_channel_match *)param;
+
+ struct ps_pcie_dma_channel_match *dma_channel_match =
+ (struct ps_pcie_dma_channel_match *)chan->private;
+
+ if (client_match && dma_channel_match) {
+ if (client_match->pci_vendorid != 0 &&
+ dma_channel_match->pci_vendorid != 0) {
+ if (client_match->pci_vendorid == dma_channel_match->pci_vendorid) {
+ if (client_match->pci_deviceid == dma_channel_match->pci_deviceid &&
+ client_match->channel_number == dma_channel_match->channel_number &&
+ client_match->direction == dma_channel_match->direction) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static int acquire_dma_channels(struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+ int i;
+ dma_cap_mask_t mask;
+ struct ps_pcie_dma_channel_match *match;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE | DMA_PRIVATE, mask);
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ match = &xdev->pcie_dma_chan[i].match;
+ match->board_number = xdev->properties->board_number;
+ match->pci_deviceid = xdev->properties->pci_deviceid;
+ match->pci_vendorid = xdev->properties->pci_vendorid;
+ match->channel_number = i;
+ match->direction = xdev->properties->direction[i];
+
+ xdev->pcie_dma_chan[i].chan =
+ dma_request_channel(mask, ps_pcie_dma_filter, match);
+
+ if (!xdev->pcie_dma_chan[i].chan) {
+ pr_err("Error channel handle %d board %d channel\n",
+ match->board_number,
+ match->channel_number);
+ err = -EINVAL;
+ goto err_out_no_channels;
+ }
+ xdev->pcie_dma_chan[i].dev =
+ xdev->pcie_dma_chan[i].chan->device->dev;
+ xdev->pcie_dma_chan[i].direction =
+ xdev->properties->direction[i];
+ xdev->pcie_dma_chan[i].mode =
+ xdev->properties->mode;
+ INIT_LIST_HEAD(&xdev->pcie_dma_chan[i].completed.clist);
+ spin_lock_init(&xdev->pcie_dma_chan[i].channel_lock);
+ }
+
+ return 0;
+
+err_out_no_channels:
+ while (i > 0) {
+ i--;
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+ }
+ return err;
+}
+
+static int create_char_dev_iface_for_dma_device(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err = 0;
+ int i;
+
+ WARN_ON(!xdev);
+
+ err = alloc_chrdev_region(&xdev->char_device, 0,
+ MAX_ALLOWED_CHANNELS_IN_HW,
+ CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate char device region\n");
+ return err;
+ }
+
+ xdev->xps_pcie_chardev.owner = THIS_MODULE;
+ cdev_init(&xdev->xps_pcie_chardev, &ps_pcie_dma_comm_fops);
+ xdev->xps_pcie_chardev.dev = xdev->char_device;
+
+ err = cdev_add(&xdev->xps_pcie_chardev, xdev->char_device,
+ MAX_ALLOWED_CHANNELS_IN_HW);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev\n");
+ goto err_out_cdev_add;
+ }
+
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++) {
+ xdev->chardev[i] =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[i].dev,
+ MKDEV(MAJOR(xdev->char_device), i),
+ xdev,
+ "%s%d_%d", CHAR_DRIVER_NAME,
+ i, xdev->properties->board_number);
+
+ if (!xdev->chardev[i]) {
+ err = PTR_ERR(xdev->chardev[i]);
+ pr_err(
+ "PS PCIe DMA Unable to create device %d\n", i);
+ goto err_out_dev_create;
+ }
+ }
+
+ return 0;
+
+err_out_dev_create:
+ while (--i >= 0) {
+ device_destroy(g_ps_pcie_dma_client_class,
+ MKDEV(MAJOR(xdev->char_device), i));
+ }
+ cdev_del(&xdev->xps_pcie_chardev);
+err_out_cdev_add:
+ unregister_chrdev_region(xdev->char_device, MAX_ALLOWED_CHANNELS_IN_HW);
+ return err;
+}
+
+static int create_char_dev_iface_for_pio(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = alloc_chrdev_region(&xdev->pio_char_device, 0, 1,
+ PIO_CHAR_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("Unable to allocate pio character device region\n");
+ return err;
+ }
+
+ xdev->xpio_char_dev.owner = THIS_MODULE;
+ cdev_init(&xdev->xpio_char_dev, &ps_pcie_pio_fops);
+ xdev->xpio_char_dev.dev = xdev->pio_char_device;
+
+ err = cdev_add(&xdev->xpio_char_dev, xdev->pio_char_device, 1);
+ if (err < 0) {
+ pr_err("PS PCIe DMA unable to add cdev for pio\n");
+ goto err_out_pio_cdev_add;
+ }
+
+ xdev->xpio_char_device =
+ device_create(g_ps_pcie_dma_client_class,
+ xdev->pcie_dma_chan[0].dev,
+ MKDEV(MAJOR(xdev->pio_char_device), 0),
+ xdev, "%s_%d", PIO_CHAR_DRIVER_NAME,
+ xdev->properties->board_number);
+
+ if (!xdev->xpio_char_device) {
+ err = PTR_ERR(xdev->xpio_char_device);
+ pr_err("PS PCIe DMA Unable to create pio device\n");
+ goto err_out_pio_dev_create;
+ }
+
+ mutex_init(&xdev->pio_chardev_mutex);
+ xdev->pio_translation_size = 0;
+ init_completion(&xdev->trans_cmpltn);
+
+ return 0;
+
+err_out_pio_dev_create:
+ cdev_del(&xdev->xpio_char_dev);
+err_out_pio_cdev_add:
+ unregister_chrdev_region(xdev->pio_char_device, 1);
+ return err;
+}
+
+static int create_char_dev_interfaces(
+ struct xlnx_ps_pcie_dma_client_device *xdev)
+{
+ int err;
+
+ err = create_char_dev_iface_for_dma_device(xdev);
+
+ if (err != 0) {
+ pr_err("Unable to create char dev dma iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_dma;
+ }
+
+ if (xdev->properties->pio_transfers == PIO_SUPPORTED) {
+ err = create_char_dev_iface_for_pio(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev pio iface %d\n",
+ xdev->properties->pci_deviceid);
+ goto no_char_iface_for_pio;
+ }
+ }
+
+ return 0;
+
+no_char_iface_for_pio:
+ destroy_char_iface_for_dma(xdev);
+no_char_iface_for_dma:
+ return err;
+}
+
+static int setup_char_devices(u16 dev_prop_index)
+{
+ struct xlnx_ps_pcie_dma_client_device *xdev;
+ int err;
+ int i;
+
+ xdev = kzalloc(sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ xdev->properties = &g_dma_deviceproperties_list[dev_prop_index];
+
+ err = acquire_dma_channels(xdev);
+ if (err != 0) {
+ pr_err("Unable to acquire dma channels %d\n",
+ dev_prop_index);
+ goto err_no_dma_channels;
+ }
+
+ err = create_char_dev_interfaces(xdev);
+ if (err != 0) {
+ pr_err("Unable to create char dev interfaces %d\n",
+ dev_prop_index);
+ goto err_no_char_dev_ifaces;
+ }
+
+ list_add_tail(&xdev->dev_node, &g_ps_pcie_dma_client_list);
+
+ return 0;
+
+err_no_char_dev_ifaces:
+ for (i = 0; i < MAX_ALLOWED_CHANNELS_IN_HW; i++)
+ dma_release_channel(xdev->pcie_dma_chan[i].chan);
+err_no_dma_channels:
+ kfree(xdev);
+ return err;
+}
+
+/**
+ * ps_pcie_dma_client_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_dma_client_init(void)
+{
+ int err;
+ int i;
+ size_t num_dma_dev_properties;
+
+ INIT_LIST_HEAD(&g_ps_pcie_dma_client_list);
+
+ g_ps_pcie_dma_client_class = class_create(THIS_MODULE, DRV_MODULE_NAME);
+ if (IS_ERR(g_ps_pcie_dma_client_class)) {
+ pr_err("%s failed to create class\n", DRV_MODULE_NAME);
+ return PTR_ERR(g_ps_pcie_dma_client_class);
+ }
+
+ num_dma_dev_properties = ARRAY_SIZE(g_dma_deviceproperties_list);
+ for (i = 0; i < num_dma_dev_properties; i++) {
+ err = setup_char_devices(i);
+ if (err) {
+ pr_err("Error creating char devices for %d\n", i);
+ goto err_no_char_devices;
+ }
+ }
+
+ pr_info("PS PCIe DMA Client Driver Init successful\n");
+ return 0;
+
+err_no_char_devices:
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+ return err;
+}
+late_initcall(ps_pcie_dma_client_init);
+
+/**
+ * ps_pcie_dma_client_exit - Driver exit function
+ *
+ */
+static void __exit ps_pcie_dma_client_exit(void)
+{
+ delete_char_devices();
+
+ if (g_ps_pcie_dma_client_class)
+ class_destroy(g_ps_pcie_dma_client_class);
+}
+
+module_exit(ps_pcie_dma_client_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA client Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_main.c b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
new file mode 100644
index 000000000000..cb3151219083
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_main.c
@@ -0,0 +1,200 @@
+/*
+ * XILINX PS PCIe driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices.
+ * This PCIe driver creates a platform device with specific platform
+ * info enabling creation of DMA device corresponding to the channel
+ * information provided in the properties
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define DRV_MODULE_NAME "ps_pcie_dma"
+
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void ps_pcie_dma_remove(struct pci_dev *pdev);
+
+static u32 channel_properties_pcie_axi[] = {
+ (u32)(PCIE_AXI_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static u32 channel_properties_axi_pcie[] = {
+ (u32)(AXI_PCIE_DIRECTION), (u32)(NUMBER_OF_BUFFER_DESCRIPTORS),
+ (u32)(DEFAULT_DMA_QUEUES), (u32)(CHANNEL_COAELSE_COUNT),
+ (u32)(CHANNEL_POLL_TIMER_FREQUENCY) };
+
+static struct property_entry generic_pcie_ep_property[] = {
+ PROPERTY_ENTRY_U32("numchannels", (u32)MAX_NUMBER_OF_CHANNELS),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel0",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel1",
+ channel_properties_axi_pcie),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel2",
+ channel_properties_pcie_axi),
+ PROPERTY_ENTRY_U32_ARRAY("ps_pcie_channel3",
+ channel_properties_axi_pcie),
+ { },
+};
+
+static const struct platform_device_info xlnx_std_platform_dev_info = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .properties = generic_pcie_ep_property,
+};
+
+/**
+ * ps_pcie_dma_probe - Driver probe function
+ * @pdev: Pointer to the pci_dev structure
+ * @ent: pci device id
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int ps_pcie_dma_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err;
+ struct platform_device *platform_dev;
+ struct platform_device_info platform_dev_info;
+
+ dev_info(&pdev->dev, "PS PCIe DMA Driver probe\n");
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+ return err;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&pdev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ /* For Root DMA platform device will be created through device tree */
+ if (pdev->vendor == PCI_VENDOR_ID_XILINX &&
+ pdev->device == ZYNQMP_RC_DMA_DEVID)
+ return 0;
+
+ memcpy(&platform_dev_info, &xlnx_std_platform_dev_info,
+ sizeof(xlnx_std_platform_dev_info));
+
+ /* Do device specific channel configuration changes to
+ * platform_dev_info.properties if required
+ * More information on channel properties can be found
+ * at Documentation/devicetree/bindings/dma/xilinx/ps-pcie-dma.txt
+ */
+
+ platform_dev_info.parent = &pdev->dev;
+ platform_dev_info.data = &pdev;
+ platform_dev_info.size_data = sizeof(struct pci_dev **);
+
+ platform_dev = platform_device_register_full(&platform_dev_info);
+ if (IS_ERR(platform_dev)) {
+ dev_err(&pdev->dev,
+ "Cannot create platform device, aborting\n");
+ return PTR_ERR(platform_dev);
+ }
+
+ pci_set_drvdata(pdev, platform_dev);
+
+ dev_info(&pdev->dev, "PS PCIe DMA driver successfully probed\n");
+
+ return 0;
+}
+
+static struct pci_device_id ps_pcie_dma_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_DMA_DEVID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_XILINX, ZYNQMP_RC_DMA_DEVID) },
+ { }
+};
+
+static struct pci_driver ps_pcie_dma_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = ps_pcie_dma_tbl,
+ .probe = ps_pcie_dma_probe,
+ .remove = ps_pcie_dma_remove,
+};
+
+/**
+ * ps_pcie_init - Driver init function
+ *
+ * Return: 0 on success. Non zero on failure
+ */
+static int __init ps_pcie_init(void)
+{
+ int ret;
+
+ pr_info("%s init()\n", DRV_MODULE_NAME);
+
+ ret = pci_register_driver(&ps_pcie_dma_driver);
+ if (ret)
+ return ret;
+
+ ret = dma_platform_driver_register();
+ if (ret)
+ pci_unregister_driver(&ps_pcie_dma_driver);
+
+ return ret;
+}
+
+/**
+ * ps_pcie_dma_remove - Driver remove function
+ * @pdev: Pointer to the pci_dev structure
+ *
+ * Return: void
+ */
+static void ps_pcie_dma_remove(struct pci_dev *pdev)
+{
+ struct platform_device *platform_dev;
+
+ platform_dev = (struct platform_device *)pci_get_drvdata(pdev);
+
+ if (platform_dev)
+ platform_device_unregister(platform_dev);
+}
+
+/**
+ * ps_pcie_exit - Driver exit function
+ *
+ * Return: void
+ */
+static void __exit ps_pcie_exit(void)
+{
+ pr_info("%s exit()\n", DRV_MODULE_NAME);
+
+ dma_platform_driver_unregister();
+ pci_unregister_driver(&ps_pcie_dma_driver);
+}
+
+module_init(ps_pcie_init);
+module_exit(ps_pcie_exit);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx PS PCIe DMA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_ps_pcie_platform.c b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
new file mode 100644
index 000000000000..17ad2cbfdeec
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
@@ -0,0 +1,3170 @@
+/*
+ * XILINX PS PCIe DMA driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * Description
+ * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
+ * on ZynqMP UltraScale+ Devices
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#include "xilinx_ps_pcie.h"
+#include "../dmaengine.h"
+
+#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
+#define MAX_BARS 6
+
+#define DMA_BAR_NUMBER 0
+
+#define MIN_SW_INTR_TRANSACTIONS 2
+
+#define CHANNEL_PROPERTY_LENGTH 50
+#define WORKQ_NAME_SIZE 100
+#define INTR_HANDLR_NAME_SIZE 100
+
+#define PS_PCIE_DMA_IRQ_NOSHARE 0
+
+#define MAX_COALESCE_COUNT 255
+
+#define DMA_CHANNEL_REGS_SIZE 0x80
+
+#define DMA_SRCQPTRLO_REG_OFFSET (0x00) /* Source Q pointer Lo */
+#define DMA_SRCQPTRHI_REG_OFFSET (0x04) /* Source Q pointer Hi */
+#define DMA_SRCQSZ_REG_OFFSET (0x08) /* Source Q size */
+#define DMA_SRCQLMT_REG_OFFSET (0x0C) /* Source Q limit */
+#define DMA_DSTQPTRLO_REG_OFFSET (0x10) /* Destination Q pointer Lo */
+#define DMA_DSTQPTRHI_REG_OFFSET (0x14) /* Destination Q pointer Hi */
+#define DMA_DSTQSZ_REG_OFFSET (0x18) /* Destination Q size */
+#define DMA_DSTQLMT_REG_OFFSET (0x1C) /* Destination Q limit */
+#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
+#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
+#define DMA_SSTAQSZ_REG_OFFSET (0x28) /* Source Status Q size */
+#define DMA_SSTAQLMT_REG_OFFSET (0x2C) /* Source Status Q limit */
+#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
+#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
+#define DMA_DSTAQSZ_REG_OFFSET (0x38) /* Destination Status Q size */
+#define DMA_DSTAQLMT_REG_OFFSET (0x3C) /* Destination Status Q limit */
+#define DMA_SRCQNXT_REG_OFFSET (0x40) /* Source Q next */
+#define DMA_DSTQNXT_REG_OFFSET (0x44) /* Destination Q next */
+#define DMA_SSTAQNXT_REG_OFFSET (0x48) /* Source Status Q next */
+#define DMA_DSTAQNXT_REG_OFFSET (0x4C) /* Destination Status Q next */
+#define DMA_SCRATCH0_REG_OFFSET (0x50) /* Scratch pad register 0 */
+
+#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60) /* DMA PCIe intr control reg */
+#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
+#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68) /* DMA AXI intr control reg */
+#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C) /* DMA AXI intr status reg */
+#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70) /* PCIe intr assert reg */
+#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74) /* AXI intr assert register */
+#define DMA_CNTRL_REG_OFFSET (0x78) /* DMA control register */
+#define DMA_STATUS_REG_OFFSET (0x7C) /* DMA status register */
+
+#define DMA_CNTRL_RST_BIT BIT(1)
+#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
+#define DMA_CNTRL_ENABL_BIT BIT(0)
+#define DMA_STATUS_DMA_PRES_BIT BIT(15)
+#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
+#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
+#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
+#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
+#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
+#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
+#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
+#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
+#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
+#define DMA_SW_INTR_ASSRT_BIT BIT(3)
+
+#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
+#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
+#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
+#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
+#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
+#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
+#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
+
+#define STA_BD_COMPLETED_BIT BIT(0)
+#define STA_BD_SOURCE_ERROR_BIT BIT(1)
+#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
+#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
+#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
+#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
+
+#define STA_BD_BYTE_COUNT_SHIFT 4
+
+#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
+
+#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
+
+#define MAX_TRANSFER_LENGTH 0x1000000
+
+#define AXI_ATTRIBUTE 0x3
+#define PCI_ATTRIBUTE 0x2
+
+#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
+
+/*
+ * User Id programmed into Source Q will be copied into Status Q of Destination
+ */
+#define DEFAULT_UID 1
+
+/*
+ * DMA channel registers
+ */
+struct DMA_ENGINE_REGISTERS {
+ u32 src_q_low; /* 0x00 */
+ u32 src_q_high; /* 0x04 */
+ u32 src_q_size; /* 0x08 */
+ u32 src_q_limit; /* 0x0C */
+ u32 dst_q_low; /* 0x10 */
+ u32 dst_q_high; /* 0x14 */
+ u32 dst_q_size; /* 0x18 */
+ u32 dst_q_limit; /* 0x1c */
+ u32 stas_q_low; /* 0x20 */
+ u32 stas_q_high; /* 0x24 */
+ u32 stas_q_size; /* 0x28 */
+ u32 stas_q_limit; /* 0x2C */
+ u32 stad_q_low; /* 0x30 */
+ u32 stad_q_high; /* 0x34 */
+ u32 stad_q_size; /* 0x38 */
+ u32 stad_q_limit; /* 0x3C */
+ u32 src_q_next; /* 0x40 */
+ u32 dst_q_next; /* 0x44 */
+ u32 stas_q_next; /* 0x48 */
+ u32 stad_q_next; /* 0x4C */
+ u32 scrathc0; /* 0x50 */
+ u32 scrathc1; /* 0x54 */
+ u32 scrathc2; /* 0x58 */
+ u32 scrathc3; /* 0x5C */
+ u32 pcie_intr_cntrl; /* 0x60 */
+ u32 pcie_intr_status; /* 0x64 */
+ u32 axi_intr_cntrl; /* 0x68 */
+ u32 axi_intr_status; /* 0x6C */
+ u32 pcie_intr_assert; /* 0x70 */
+ u32 axi_intr_assert; /* 0x74 */
+ u32 dma_channel_ctrl; /* 0x78 */
+ u32 dma_channel_status; /* 0x7C */
+} __attribute__((__packed__));
+
+/**
+ * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @user_id: User id gets copied to status q of destination
+ */
+struct SOURCE_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+/**
+ * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
+ * @system_address: 64 bit buffer physical address
+ * @control_byte_count: Byte count/buffer length and control flags
+ * @user_handle: User handle gets copied to status q on completion
+ * @reserved: Reserved field
+ */
+struct DEST_DMA_DESCRIPTOR {
+ u64 system_address;
+ u32 control_byte_count;
+ u16 user_handle;
+ u16 reserved;
+} __attribute__((__packed__));
+
+/**
+ * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
+ * @status_flag_byte_count: Byte count/buffer length and status flags
+ * @user_handle: User handle gets copied from src/dstq on completion
+ * @user_id: User id gets copied from srcq
+ */
+struct STATUS_DMA_DESCRIPTOR {
+ u32 status_flag_byte_count;
+ u16 user_handle;
+ u16 user_id;
+} __attribute__((__packed__));
+
+enum PACKET_CONTEXT_AVAILABILITY {
+ FREE = 0, /*Packet transfer Parameter context is free.*/
+ IN_USE /*Packet transfer Parameter context is in use.*/
+};
+
+struct ps_pcie_transfer_elements {
+ struct list_head node;
+ dma_addr_t src_pa;
+ dma_addr_t dst_pa;
+ u32 transfer_bytes;
+};
+
+struct ps_pcie_tx_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head transfer_nodes;
+ u32 src_elements;
+ u32 dst_elements;
+ u32 total_transfer_bytes;
+};
+
+struct ps_pcie_intr_segment {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_intr_tx;
+};
+
+/*
+ * The context structure stored for each DMA transaction
+ * This structure is maintained separately for Src Q and Destination Q
+ * @availability_status: Indicates whether packet context is available
+ * @idx_sop: Indicates starting index of buffer descriptor for a transfer
+ * @idx_eop: Indicates ending index of buffer descriptor for a transfer
+ * @sgl: Indicates either src or dst sglist for the transaction
+ */
+struct PACKET_TRANSFER_PARAMS {
+ enum PACKET_CONTEXT_AVAILABILITY availability_status;
+ u16 idx_sop;
+ u16 idx_eop;
+ struct ps_pcie_tx_segment *seg;
+};
+
+enum CHANNEL_STATE {
+ CHANNEL_RESOURCE_UNALLOCATED = 0, /* Channel resources not allocated */
+ CHANNEL_UNAVIALBLE, /* Channel inactive */
+ CHANNEL_AVAILABLE, /* Channel available for transfers */
+ CHANNEL_ERROR /* Channel encountered errors */
+};
+
+enum BUFFER_LOCATION {
+ BUFFER_LOC_PCI = 0,
+ BUFFER_LOC_AXI,
+ BUFFER_LOC_INVALID
+};
+
+enum dev_channel_properties {
+ DMA_CHANNEL_DIRECTION = 0,
+ NUM_DESCRIPTORS,
+ NUM_QUEUES,
+ COALESE_COUNT,
+ POLL_TIMER_FREQUENCY
+};
+
+/*
+ * struct ps_pcie_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @dev: The dma device
+ * @common: DMA common channel
+ * @chan_base: Pointer to Channel registers
+ * @channel_number: DMA channel number in the device
+ * @num_queues: Number of queues per channel.
+ * It should be four for memory mapped case and
+ * two for Streaming case
+ * @direction: Transfer direction
+ * @state: Indicates channel state
+ * @channel_lock: Spin lock to be used before changing channel state
+ * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
+ * @coalesce_count: Indicates number of packet transfers before interrupts
+ * @poll_timer_freq:Indicates frequency of polling for completed transactions
+ * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
+ * @src_avail_descriptors: Available sgl source descriptors
+ * @src_desc_lock: Lock for synchronizing src_avail_descriptors
+ * @dst_avail_descriptors: Available sgl destination descriptors
+ * @dst_desc_lock: Lock for synchronizing
+ * dst_avail_descriptors
+ * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
+ * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
+ * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
+ * @sglDestinationQLock:Lock to serialize Destination Q updates
+ * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
+ * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
+ * @dst_sgl_freeidx: Holds index of Destination SGL
+ * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
+ * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
+ * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
+ * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
+ * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
+ * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
+ * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
+ * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
+ * @@read_attribute: Describes the attributes of buffer in srcq
+ * @@write_attribute: Describes the attributes of buffer in dstq
+ * @@intr_status_offset: Register offset to be cheked on receiving interrupt
+ * @@intr_status_offset: Register offset to be used to control interrupts
+ * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
+ * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
+ * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
+ * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
+ * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
+ * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
+ * @pending_list_lock: Lock to be taken before updating pending transfers list
+ * @pending_list: List of transactions submitted to channel
+ * @active_list_lock: Lock to be taken before transferring transactions from
+ * pending list to active list which will be subsequently
+ * submitted to hardware
+ * @active_list: List of transactions that will be submitted to hardware
+ * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
+ * @pending_interrupts_list: List of interrupt transactions submitted to channel
+ * @active_interrupts_lock: Lock to be taken before transferring transactions
+ * from pending interrupt list to active interrupt list
+ * @active_interrupts_list: List of interrupt transactions that are active
+ * @transactions_pool: Mem pool to allocate dma transactions quickly
+ * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
+ * @sw_intrs_wrkq: Work Q which performs handling of software intrs
+ * @handle_sw_intrs:Work function handling software interrupts
+ * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
+ * @handle_chan_reset: Work that invokes channel reset function
+ * @handle_chan_shutdown: Work that invokes channel shutdown function
+ * @handle_chan_terminate: Work that invokes channel transactions termination
+ * @chan_shutdown_complt: Completion variable which says shutdown is done
+ * @chan_terminate_complete: Completion variable which says terminate is done
+ * @primary_desc_cleanup: Work Q which performs work related to sgl handling
+ * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
+ * and programming
+ * @chan_programming: Work Q which performs work related to channel programming
+ * @handle_chan_programming: Work that invokes channel programming function
+ * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
+ * @handle_srcq_desc_cleanup: Work function handling Src Q completions
+ * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
+ * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
+ * @srcq_work_complete: Src Q Work completion variable for primary work
+ * @dstq_work_complete: Dst Q Work completion variable for primary work
+ */
+struct ps_pcie_dma_chan {
+ struct xlnx_pcie_dma_device *xdev;
+ struct device *dev;
+
+ struct dma_chan common;
+
+ struct DMA_ENGINE_REGISTERS *chan_base;
+ u16 channel_number;
+
+ u32 num_queues;
+ enum dma_data_direction direction;
+ enum BUFFER_LOCATION srcq_buffer_location;
+ enum BUFFER_LOCATION dstq_buffer_location;
+
+ u32 total_descriptors;
+
+ enum CHANNEL_STATE state;
+ spinlock_t channel_lock; /* For changing channel state */
+
+ spinlock_t cookie_lock; /* For acquiring cookie from dma framework*/
+
+ u32 coalesce_count;
+ u32 poll_timer_freq;
+
+ struct timer_list poll_timer;
+
+ u32 src_avail_descriptors;
+ spinlock_t src_desc_lock; /* For handling srcq available descriptors */
+
+ u32 dst_avail_descriptors;
+ spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
+
+ dma_addr_t src_sgl_bd_pa;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
+ u32 src_sgl_freeidx;
+
+ dma_addr_t dst_sgl_bd_pa;
+ struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
+ u32 dst_sgl_freeidx;
+
+ dma_addr_t src_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
+ u32 src_staprobe_idx;
+ u32 src_sta_hw_probe_idx;
+
+ dma_addr_t dst_sta_bd_pa;
+ struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
+ u32 dst_staprobe_idx;
+ u32 dst_sta_hw_probe_idx;
+
+ u32 read_attribute;
+ u32 write_attribute;
+
+ u32 intr_status_offset;
+ u32 intr_control_offset;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
+ u16 idx_ctx_srcq_head;
+ u16 idx_ctx_srcq_tail;
+
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
+ u16 idx_ctx_dstq_head;
+ u16 idx_ctx_dstq_tail;
+
+ spinlock_t pending_list_lock; /* For handling dma pending_list */
+ struct list_head pending_list;
+ spinlock_t active_list_lock; /* For handling dma active_list */
+ struct list_head active_list;
+
+ spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
+ struct list_head pending_interrupts_list;
+ spinlock_t active_interrupts_lock; /* For dma active interrupts list*/
+ struct list_head active_interrupts_list;
+
+ mempool_t *transactions_pool;
+ mempool_t *tx_elements_pool;
+ mempool_t *intr_transactions_pool;
+
+ struct workqueue_struct *sw_intrs_wrkq;
+ struct work_struct handle_sw_intrs;
+
+ struct workqueue_struct *maintenance_workq;
+ struct work_struct handle_chan_reset;
+ struct work_struct handle_chan_shutdown;
+ struct work_struct handle_chan_terminate;
+
+ struct completion chan_shutdown_complt;
+ struct completion chan_terminate_complete;
+
+ struct workqueue_struct *primary_desc_cleanup;
+ struct work_struct handle_primary_desc_cleanup;
+
+ struct workqueue_struct *chan_programming;
+ struct work_struct handle_chan_programming;
+
+ struct workqueue_struct *srcq_desc_cleanup;
+ struct work_struct handle_srcq_desc_cleanup;
+ struct completion srcq_work_complete;
+
+ struct workqueue_struct *dstq_desc_cleanup;
+ struct work_struct handle_dstq_desc_cleanup;
+ struct completion dstq_work_complete;
+};
+
+/*
+ * struct xlnx_pcie_dma_device - Driver specific platform device structure
+ * @is_rootdma: Indicates whether the dma instance is root port dma
+ * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
+ * @bar_mask: Indicates available pcie bars
+ * @board_number: Count value of platform device
+ * @dev: Device structure pointer for pcie device
+ * @channels: Pointer to device DMA channels structure
+ * @common: DMA device structure
+ * @num_channels: Number of channels active for the device
+ * @reg_base: Base address of first DMA channel of the device
+ * @irq_vecs: Number of irq vectors allocated to pci device
+ * @pci_dev: Parent pci device which created this platform device
+ * @bar_info: PCIe bar related information
+ * @platform_irq_vec: Platform irq vector number for root dma
+ * @rootdma_vendor: PCI Vendor id for root dma
+ * @rootdma_device: PCI Device id for root dma
+ */
+struct xlnx_pcie_dma_device {
+ bool is_rootdma;
+ bool dma_buf_ext_addr;
+ u32 bar_mask;
+ u16 board_number;
+ struct device *dev;
+ struct ps_pcie_dma_chan *channels;
+ struct dma_device common;
+ int num_channels;
+ int irq_vecs;
+ void __iomem *reg_base;
+ struct pci_dev *pci_dev;
+ struct BAR_PARAMS bar_info[MAX_BARS];
+ int platform_irq_vec;
+ u16 rootdma_vendor;
+ u16 rootdma_device;
+};
+
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct ps_pcie_dma_chan, common)
+#define to_ps_pcie_dma_tx_descriptor(tx) \
+ container_of(tx, struct ps_pcie_tx_segment, async_tx)
+#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
+ container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
+
+/* Function Protypes */
+static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
+static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value);
+static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask);
+static int irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
+static int irq_probe(struct xlnx_pcie_dma_device *xdev);
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
+static int init_hw_components(struct ps_pcie_dma_chan *chan);
+static int init_sw_components(struct ps_pcie_dma_chan *chan);
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
+static void poll_completed_transactions(struct timer_list *t);
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void handle_error(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg);
+static void ps_pcie_chan_program_work(struct work_struct *work);
+static void dst_cleanup_work(struct work_struct *work);
+static void src_cleanup_work(struct work_struct *work);
+static void ps_pcie_chan_primary_work(struct work_struct *work);
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number);
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
+static void terminate_transactions_work(struct work_struct *work);
+static void chan_shutdown_work(struct work_struct *work);
+static void chan_reset_work(struct work_struct *work);
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags);
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev);
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
+
+/* IO accessors */
+static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
+{
+ return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
+}
+
+static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
+}
+
+static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
+ u32 mask)
+{
+ ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
+}
+
+/**
+ * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ (struct xlnx_pcie_dma_device *)data;
+ struct ps_pcie_dma_chan *chan = NULL;
+ int i;
+ int err = -1;
+ int ret = -1;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = ps_pcie_check_intr_status(chan);
+ if (err == 0)
+ ret = 0;
+ }
+
+ return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/**
+ * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
+ *
+ * @irq: IRQ number
+ * @data: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
+{
+ struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
+
+ ps_pcie_check_intr_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * chan_intr_setup - Requests Interrupt handler for individual channels
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ struct ps_pcie_dma_chan *chan;
+ int i;
+ int err = 0;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ chan = &xdev->channels[i];
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i),
+ ps_pcie_dma_chan_intr_handler,
+ PS_PCIE_DMA_IRQ_NOSHARE,
+ "PS PCIe DMA Chan Intr handler", chan);
+ if (err) {
+ dev_err(xdev->dev,
+ "Irq %d for chan %d error %d\n",
+ pci_irq_vector(xdev->pci_dev, i),
+ chan->channel_number, err);
+ break;
+ }
+ }
+
+ if (err) {
+ while (--i >= 0) {
+ chan = &xdev->channels[i];
+ devm_free_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, i), chan);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * device_intr_setup - Requests interrupt handler for DMA device
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ unsigned long intr_flags = IRQF_SHARED;
+
+ if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
+ intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
+
+ err = devm_request_irq(xdev->dev,
+ pci_irq_vector(xdev->pci_dev, 0),
+ ps_pcie_dma_dev_intr_handler,
+ intr_flags,
+ "PS PCIe DMA Intr Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ pci_irq_vector(xdev->pci_dev, 0));
+
+ return err;
+}
+
+/**
+ * irq_setup - Requests interrupts based on the interrupt type detected
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: 0 on success and non zero value on failure.
+ */
+static int irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ if (xdev->irq_vecs == xdev->num_channels)
+ err = chan_intr_setup(xdev);
+ else
+ err = device_intr_setup(xdev);
+
+ return err;
+}
+
+static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+
+ err = devm_request_irq(xdev->dev,
+ xdev->platform_irq_vec,
+ ps_pcie_dma_dev_intr_handler,
+ IRQF_SHARED,
+ "PS PCIe Root DMA Handler", xdev);
+ if (err)
+ dev_err(xdev->dev, "Couldn't request irq %d\n",
+ xdev->platform_irq_vec);
+
+ return err;
+}
+
+/**
+ * irq_probe - Checks which interrupt types can be serviced by hardware
+ *
+ * @xdev: Driver specific data for device
+ *
+ * Return: Number of interrupt vectors when successful or -ENOSPC on failure
+ */
+static int irq_probe(struct xlnx_pcie_dma_device *xdev)
+{
+ struct pci_dev *pdev;
+
+ pdev = xdev->pci_dev;
+
+ xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
+ PCI_IRQ_ALL_TYPES);
+ return xdev->irq_vecs;
+}
+
+/**
+ * ps_pcie_check_intr_status - Checks channel interrupt status
+ *
+ * @chan: Pointer to the PS PCIe DMA channel structure
+ *
+ * Return: 0 if interrupt is pending on channel
+ * -1 if no interrupt is pending on channel
+ */
+static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
+{
+ int err = -1;
+ u32 status;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return err;
+
+ status = ps_pcie_dma_read(chan, chan->intr_status_offset);
+
+ if (status & DMA_INTSTATUS_SGLINTR_BIT) {
+ if (chan->primary_desc_cleanup) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SGLINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_SWINTR_BIT) {
+ if (chan->sw_intrs_wrkq)
+ queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_SWINTR_BIT);
+ err = 0;
+ }
+
+ if (status & DMA_INTSTATUS_DMAERR_BIT) {
+ dev_err(chan->dev,
+ "DMA Channel %d ControlStatus Reg: 0x%x",
+ chan->channel_number, status);
+ dev_err(chan->dev,
+ "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->src_q_limit,
+ chan->chan_base->src_q_size,
+ chan->chan_base->src_q_next);
+ dev_err(chan->dev,
+ "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stas_q_limit,
+ chan->chan_base->stas_q_size,
+ chan->chan_base->stas_q_next);
+ dev_err(chan->dev,
+ "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
+ chan->channel_number,
+ chan->chan_base->dst_q_limit,
+ chan->chan_base->dst_q_size,
+ chan->chan_base->dst_q_next);
+ dev_err(chan->dev,
+ "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
+ chan->channel_number,
+ chan->chan_base->stad_q_limit,
+ chan->chan_base->stad_q_size,
+ chan->chan_base->stad_q_next);
+ /* Clearing Persistent bit */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT);
+
+ handle_error(chan);
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int init_hw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
+ /* Programming SourceQ and StatusQ bd addresses */
+ chan->chan_base->src_q_next = 0;
+ chan->chan_base->src_q_high =
+ upper_32_bits(chan->src_sgl_bd_pa);
+ chan->chan_base->src_q_size = chan->total_descriptors;
+ chan->chan_base->src_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->src_q_low = 0;
+ }
+ chan->chan_base->src_q_low |=
+ (lower_32_bits((chan->src_sgl_bd_pa))
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stas_q_next = 0;
+ chan->chan_base->stas_q_high =
+ upper_32_bits(chan->src_sta_bd_pa);
+ chan->chan_base->stas_q_size = chan->total_descriptors;
+ chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stas_q_low = 0;
+ }
+ chan->chan_base->stas_q_low |=
+ (lower_32_bits(chan->src_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
+ /* Programming DestinationQ and StatusQ buffer descriptors */
+ chan->chan_base->dst_q_next = 0;
+ chan->chan_base->dst_q_high =
+ upper_32_bits(chan->dst_sgl_bd_pa);
+ chan->chan_base->dst_q_size = chan->total_descriptors;
+ chan->chan_base->dst_q_limit = 0;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->dst_q_low = 0;
+ }
+ chan->chan_base->dst_q_low |=
+ (lower_32_bits(chan->dst_sgl_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+
+ chan->chan_base->stad_q_next = 0;
+ chan->chan_base->stad_q_high =
+ upper_32_bits(chan->dst_sta_bd_pa);
+ chan->chan_base->stad_q_size = chan->total_descriptors;
+ chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
+ if (chan->xdev->is_rootdma) {
+ chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
+ | DMA_QPTRLO_QLOCAXI_BIT;
+ } else {
+ chan->chan_base->stad_q_low = 0;
+ }
+ chan->chan_base->stad_q_low |=
+ (lower_32_bits(chan->dst_sta_bd_pa)
+ & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
+ | DMA_QPTRLO_Q_ENABLE_BIT;
+ }
+
+ return 0;
+}
+
+static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ chan->read_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->read_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+}
+
+static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->xdev->is_rootdma) {
+ /* For Root DMA, Host Memory and Buffer Descriptors
+ * will be on AXI side
+ */
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ }
+ } else {
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ chan->write_attribute = PCI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT;
+ } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
+ chan->write_attribute = (AXI_ATTRIBUTE <<
+ SRC_CTL_ATTRIB_BIT_SHIFT) |
+ SOURCE_CONTROL_BD_LOC_AXI;
+ }
+ }
+ chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
+}
+
+static int init_sw_components(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->ppkt_ctx_srcq && chan->psrc_sgl_bd &&
+ chan->psrc_sta_bd) {
+ memset(chan->ppkt_ctx_srcq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sgl_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->psrc_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->src_avail_descriptors = chan->total_descriptors;
+
+ chan->src_sgl_freeidx = 0;
+ chan->src_staprobe_idx = 0;
+ chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_srcq_head = 0;
+ chan->idx_ctx_srcq_tail = 0;
+ }
+
+ if (chan->ppkt_ctx_dstq && chan->pdst_sgl_bd &&
+ chan->pdst_sta_bd) {
+ memset(chan->ppkt_ctx_dstq, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sgl_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ memset(chan->pdst_sta_bd, 0,
+ sizeof(struct STATUS_DMA_DESCRIPTOR)
+ * chan->total_descriptors);
+
+ chan->dst_avail_descriptors = chan->total_descriptors;
+
+ chan->dst_sgl_freeidx = 0;
+ chan->dst_staprobe_idx = 0;
+ chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
+ chan->idx_ctx_dstq_head = 0;
+ chan->idx_ctx_dstq_tail = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * ps_pcie_chan_reset - Resets channel, by programming relevant registers
+ *
+ * @chan: PS PCIe DMA channel information holder
+ * Return: void
+ */
+static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
+{
+ /* Enable channel reset */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+
+ mdelay(10);
+
+ /* Disable channel reset */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
+}
+
+/**
+ * poll_completed_transactions - Function invoked by poll timer
+ *
+ * @t: Pointer to timer triggering this callback
+ * Return: void
+ */
+static void poll_completed_transactions(struct timer_list *t)
+{
+ struct ps_pcie_dma_chan *chan = from_timer(chan, t, poll_timer);
+
+ if (chan->state == CHANNEL_AVAILABLE) {
+ queue_work(chan->primary_desc_cleanup,
+ &chan->handle_primary_desc_cleanup);
+ }
+
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (seg->src_elements) {
+ if (chan->src_avail_descriptors >=
+ seg->src_elements) {
+ return true;
+ }
+ } else if (seg->dst_elements) {
+ if (chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->src_avail_descriptors >=
+ seg->src_elements &&
+ chan->dst_avail_descriptors >=
+ seg->dst_elements) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ if (chan->num_queues == DEFAULT_DMA_QUEUES)
+ return check_descriptors_for_all_queues(chan, seg);
+ else
+ return check_descriptors_for_two_queues(chan, seg);
+}
+
+static void handle_error(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->state != CHANNEL_AVAILABLE)
+ return;
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_ERROR;
+ spin_unlock(&chan->channel_lock);
+
+ if (chan->maintenance_workq)
+ queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
+}
+
+static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct SOURCE_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "src pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ /* Get the address of the next available DMA Descriptor */
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->src_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->src_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->src_pa;
+ }
+
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->read_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_srcq_head;
+ pdesc->user_id = DEFAULT_UID;
+ /* Check if this is last descriptor */
+ if (i == (seg->src_elements - 1)) {
+ pkt_ctx->idx_eop = chan->src_sgl_freeidx;
+ pdesc->control_byte_count |= SOURCE_CONTROL_BD_EOP_BIT;
+ if ((seg->async_tx.flags & DMA_PREP_INTERRUPT) ==
+ DMA_PREP_INTERRUPT) {
+ pdesc->control_byte_count |=
+ SOURCE_CONTROL_BD_INTR_BIT;
+ }
+ }
+ chan->src_sgl_freeidx++;
+ if (chan->src_sgl_freeidx == chan->total_descriptors)
+ chan->src_sgl_freeidx = 0;
+ pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors--;
+ spin_unlock(&chan->src_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
+ chan->idx_ctx_srcq_head++;
+ if (chan->idx_ctx_srcq_head == chan->total_descriptors)
+ chan->idx_ctx_srcq_head = 0;
+}
+
+static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
+ struct ps_pcie_tx_segment *seg)
+{
+ struct DEST_DMA_DESCRIPTOR *pdesc;
+ struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ u32 i = 0;
+
+ pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
+ if (pkt_ctx->availability_status == IN_USE) {
+ dev_err(chan->dev,
+ "dst pkt context not avail for channel %d\n",
+ chan->channel_number);
+ handle_error(chan);
+
+ return;
+ }
+
+ pkt_ctx->availability_status = IN_USE;
+
+ if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
+ pkt_ctx->seg = seg;
+
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
+
+ /* Build transactions using information in the scatter gather list */
+ list_for_each_entry(ele, &seg->transfer_nodes, node) {
+ if (chan->xdev->dma_buf_ext_addr) {
+ pdesc->system_address =
+ (u64)ele->dst_pa;
+ } else {
+ pdesc->system_address =
+ (u32)ele->dst_pa;
+ }
+ pdesc->control_byte_count = (ele->transfer_bytes &
+ SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
+ chan->write_attribute;
+
+ pdesc->user_handle = chan->idx_ctx_dstq_head;
+ /* Check if this is last descriptor */
+ if (i == (seg->dst_elements - 1))
+ pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
+ chan->dst_sgl_freeidx++;
+ if (chan->dst_sgl_freeidx == chan->total_descriptors)
+ chan->dst_sgl_freeidx = 0;
+ pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors--;
+ spin_unlock(&chan->dst_desc_lock);
+ i++;
+ }
+
+ chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
+ chan->idx_ctx_dstq_head++;
+ if (chan->idx_ctx_dstq_head == chan->total_descriptors)
+ chan->idx_ctx_dstq_head = 0;
+}
+
+static void ps_pcie_chan_program_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan,
+ handle_chan_programming);
+ struct ps_pcie_tx_segment *seg = NULL;
+
+ while (chan->state == CHANNEL_AVAILABLE) {
+ spin_lock(&chan->active_list_lock);
+ seg = list_first_entry_or_null(&chan->active_list,
+ struct ps_pcie_tx_segment, node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (!seg)
+ break;
+
+ if (check_descriptor_availability(chan, seg) == false)
+ break;
+
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+
+ if (seg->src_elements)
+ xlnx_ps_pcie_update_srcq(chan, seg);
+
+ if (seg->dst_elements)
+ xlnx_ps_pcie_update_dstq(chan, seg);
+ }
+}
+
+/**
+ * dst_cleanup_work - Goes through all completed elements in status Q
+ * and invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void dst_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct DEST_DMA_DESCRIPTOR *pdst_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 dstq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Destination Err",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Source Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d chan %d has Internal Error",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ /* we are using 64 bit USER field. */
+ if ((psta_bd->status_flag_byte_count &
+ STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Dst Sts Elmnt %d for chan %d has NON ZERO",
+ chan->dst_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+
+ chan->idx_ctx_dstq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count &
+ STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->dst_staprobe_idx++;
+
+ if (chan->dst_staprobe_idx == chan->total_descriptors)
+ chan->dst_staprobe_idx = 0;
+
+ chan->dst_sta_hw_probe_idx++;
+
+ if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
+ chan->dst_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
+
+ psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
+
+ dstq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
+ memset(pdst_bd, 0,
+ sizeof(struct DEST_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->dst_desc_lock);
+ chan->dst_avail_descriptors++;
+ spin_unlock(&chan->dst_desc_lock);
+
+ if (dstq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+
+ dstq_desc_idx++;
+
+ if (dstq_desc_idx == chan->total_descriptors)
+ dstq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->dstq_work_complete);
+}
+
+/**
+ * src_cleanup_work - Goes through all completed elements in status Q and
+ * invokes callbacks for the concerned DMA transaction.
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void src_cleanup_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
+
+ struct STATUS_DMA_DESCRIPTOR *psta_bd;
+ struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
+ struct dmaengine_result rslt;
+ u32 completed_bytes;
+ u32 srcq_desc_idx;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_DESTINATION_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Dst Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Source Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if (psta_bd->status_flag_byte_count &
+ STA_BD_INTERNAL_ERROR_BIT) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has Internal Error",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ if ((psta_bd->status_flag_byte_count
+ & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
+ dev_err(chan->dev,
+ "Src Sts Elmnt %d chan %d has NonZero",
+ chan->src_staprobe_idx + 1,
+ chan->channel_number);
+ handle_error(chan);
+ break;
+ }
+ chan->idx_ctx_srcq_tail = psta_bd->user_handle;
+ ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
+ completed_bytes = (psta_bd->status_flag_byte_count
+ & STA_BD_BYTE_COUNT_MASK) >>
+ STA_BD_BYTE_COUNT_SHIFT;
+
+ memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
+
+ chan->src_staprobe_idx++;
+
+ if (chan->src_staprobe_idx == chan->total_descriptors)
+ chan->src_staprobe_idx = 0;
+
+ chan->src_sta_hw_probe_idx++;
+
+ if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
+ chan->src_sta_hw_probe_idx = 0;
+
+ chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
+
+ psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
+
+ srcq_desc_idx = ppkt_ctx->idx_sop;
+
+ do {
+ psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
+ memset(psrc_bd, 0,
+ sizeof(struct SOURCE_DMA_DESCRIPTOR));
+
+ spin_lock(&chan->src_desc_lock);
+ chan->src_avail_descriptors++;
+ spin_unlock(&chan->src_desc_lock);
+
+ if (srcq_desc_idx == ppkt_ctx->idx_eop)
+ break;
+ srcq_desc_idx++;
+
+ if (srcq_desc_idx == chan->total_descriptors)
+ srcq_desc_idx = 0;
+
+ } while (1);
+
+ /* Invoking callback */
+ if (ppkt_ctx->seg) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&ppkt_ctx->seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+ rslt.result = DMA_TRANS_NOERROR;
+ rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
+ completed_bytes;
+ dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
+ &rslt);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &ppkt_ctx->seg->transfer_nodes,
+ node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(ppkt_ctx->seg, chan->transactions_pool);
+ }
+ memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
+ }
+
+ complete(&chan->srcq_work_complete);
+}
+
+/**
+ * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
+ * destination Q processing. Waits for source Q and destination Q processing
+ * and re enables interrupts. Same work is invoked by timer if coalesce count
+ * is greater than zero and interrupts are not invoked before the timeout period
+ *
+ * @work: Work associated with the task
+ *
+ * Return: void
+ */
+static void ps_pcie_chan_primary_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(
+ work, struct ps_pcie_dma_chan,
+ handle_primary_desc_cleanup);
+
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->psrc_sgl_bd) {
+ reinit_completion(&chan->srcq_work_complete);
+ if (chan->srcq_desc_cleanup)
+ queue_work(chan->srcq_desc_cleanup,
+ &chan->handle_srcq_desc_cleanup);
+ }
+ if (chan->pdst_sgl_bd) {
+ reinit_completion(&chan->dstq_work_complete);
+ if (chan->dstq_desc_cleanup)
+ queue_work(chan->dstq_desc_cleanup,
+ &chan->handle_dstq_desc_cleanup);
+ }
+
+ if (chan->psrc_sgl_bd)
+ wait_for_completion_interruptible(&chan->srcq_work_complete);
+ if (chan->pdst_sgl_bd)
+ wait_for_completion_interruptible(&chan->dstq_work_complete);
+
+ /* Enable interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ if (chan->chan_programming) {
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+ }
+
+ if (chan->coalesce_count > 0 && chan->poll_timer.function)
+ mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
+}
+
+static int read_rootdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct resource *r;
+
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
+ err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "DMA mask set error\n");
+ return err;
+ }
+ }
+
+ err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
+ err = dma_set_coherent_mask(&platform_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
+ return err;
+ }
+ }
+
+ r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
+ "ps_pcie_regbase");
+ if (!r) {
+ dev_err(&platform_dev->dev,
+ "Unable to find memory resource for root dma\n");
+ return PTR_ERR(r);
+ }
+
+ xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
+ if (IS_ERR(xdev->reg_base)) {
+ dev_err(&platform_dev->dev, "ioresource error for root dma\n");
+ return PTR_ERR(xdev->reg_base);
+ }
+
+ xdev->platform_irq_vec =
+ platform_get_irq_byname(platform_dev,
+ "ps_pcie_rootdma_intr");
+ if (xdev->platform_irq_vec < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to get interrupt number for root dma\n");
+ return xdev->platform_irq_vec;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
+ &xdev->rootdma_vendor);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Vendor Id\n");
+ return err;
+ }
+
+ err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
+ &xdev->rootdma_device);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find RootDMA PCI Device Id\n");
+ return err;
+ }
+
+ xdev->common.dev = xdev->dev;
+
+ return 0;
+}
+
+static int read_epdma_config(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev)
+{
+ int err;
+ struct pci_dev *pdev;
+ u16 i;
+ void __iomem * const *pci_iomap;
+ unsigned long pci_bar_length;
+
+ pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
+ xdev->pci_dev = pdev;
+
+ for (i = 0; i < MAX_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ xdev->bar_mask = xdev->bar_mask | (1 << (i));
+ }
+
+ err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
+ return err;
+ }
+
+ pci_iomap = pcim_iomap_table(pdev);
+ if (!pci_iomap) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < MAX_BARS; i++) {
+ pci_bar_length = pci_resource_len(pdev, i);
+ if (pci_bar_length == 0) {
+ xdev->bar_info[i].BAR_LENGTH = 0;
+ xdev->bar_info[i].BAR_PHYS_ADDR = 0;
+ xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
+ } else {
+ xdev->bar_info[i].BAR_LENGTH =
+ pci_bar_length;
+ xdev->bar_info[i].BAR_PHYS_ADDR =
+ pci_resource_start(pdev, i);
+ xdev->bar_info[i].BAR_VIRT_ADDR =
+ (void *)pci_iomap[i];
+ }
+ }
+
+ xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
+
+ err = irq_probe(xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
+ platform_dev->id);
+ return err;
+ }
+
+ xdev->common.dev = &pdev->dev;
+
+ return 0;
+}
+
+static int probe_channel_properties(struct platform_device *platform_dev,
+ struct xlnx_pcie_dma_device *xdev,
+ u16 channel_number)
+{
+ int i;
+ char propertyname[CHANNEL_PROPERTY_LENGTH];
+ int numvals, ret;
+ u32 *val;
+ struct ps_pcie_dma_chan *channel;
+ struct ps_pcie_dma_channel_match *xlnx_match;
+
+ snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
+ "ps_pcie_channel%d", channel_number);
+
+ channel = &xdev->channels[channel_number];
+
+ spin_lock_init(&channel->channel_lock);
+ spin_lock_init(&channel->cookie_lock);
+
+ INIT_LIST_HEAD(&channel->pending_list);
+ spin_lock_init(&channel->pending_list_lock);
+
+ INIT_LIST_HEAD(&channel->active_list);
+ spin_lock_init(&channel->active_list_lock);
+
+ spin_lock_init(&channel->src_desc_lock);
+ spin_lock_init(&channel->dst_desc_lock);
+
+ INIT_LIST_HEAD(&channel->pending_interrupts_list);
+ spin_lock_init(&channel->pending_interrupts_lock);
+
+ INIT_LIST_HEAD(&channel->active_interrupts_list);
+ spin_lock_init(&channel->active_interrupts_lock);
+
+ init_completion(&channel->srcq_work_complete);
+ init_completion(&channel->dstq_work_complete);
+ init_completion(&channel->chan_shutdown_complt);
+ init_completion(&channel->chan_terminate_complete);
+
+ if (device_property_present(&platform_dev->dev, propertyname)) {
+ numvals = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, NULL, 0);
+
+ if (numvals < 0)
+ return numvals;
+
+ val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
+ GFP_KERNEL);
+
+ if (!val)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(&platform_dev->dev,
+ propertyname, val,
+ numvals);
+ if (ret < 0) {
+ dev_err(&platform_dev->dev,
+ "Unable to read property %s\n", propertyname);
+ return ret;
+ }
+
+ for (i = 0; i < numvals; i++) {
+ switch (i) {
+ case DMA_CHANNEL_DIRECTION:
+ channel->direction =
+ (val[DMA_CHANNEL_DIRECTION] ==
+ PCIE_AXI_DIRECTION) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ break;
+ case NUM_DESCRIPTORS:
+ channel->total_descriptors =
+ val[NUM_DESCRIPTORS];
+ if (channel->total_descriptors >
+ MAX_DESCRIPTORS) {
+ dev_info(&platform_dev->dev,
+ "Descriptors > alowd max\n");
+ channel->total_descriptors =
+ MAX_DESCRIPTORS;
+ }
+ break;
+ case NUM_QUEUES:
+ channel->num_queues = val[NUM_QUEUES];
+ switch (channel->num_queues) {
+ case DEFAULT_DMA_QUEUES:
+ break;
+ case TWO_DMA_QUEUES:
+ break;
+ default:
+ dev_info(&platform_dev->dev,
+ "Incorrect Q number for dma chan\n");
+ channel->num_queues = DEFAULT_DMA_QUEUES;
+ }
+ break;
+ case COALESE_COUNT:
+ channel->coalesce_count = val[COALESE_COUNT];
+
+ if (channel->coalesce_count >
+ MAX_COALESCE_COUNT) {
+ dev_info(&platform_dev->dev,
+ "Invalid coalesce Count\n");
+ channel->coalesce_count =
+ MAX_COALESCE_COUNT;
+ }
+ break;
+ case POLL_TIMER_FREQUENCY:
+ channel->poll_timer_freq =
+ val[POLL_TIMER_FREQUENCY];
+ break;
+ default:
+ dev_err(&platform_dev->dev,
+ "Check order of channel properties!\n");
+ }
+ }
+ } else {
+ dev_err(&platform_dev->dev,
+ "Property %s not present. Invalid configuration!\n",
+ propertyname);
+ return -ENOTSUPP;
+ }
+
+ if (channel->direction == DMA_TO_DEVICE) {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_AXI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_PCI;
+ channel->dstq_buffer_location = BUFFER_LOC_INVALID;
+ }
+ } else {
+ if (channel->num_queues == DEFAULT_DMA_QUEUES) {
+ channel->srcq_buffer_location = BUFFER_LOC_AXI;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ } else {
+ channel->srcq_buffer_location = BUFFER_LOC_INVALID;
+ channel->dstq_buffer_location = BUFFER_LOC_PCI;
+ }
+ }
+
+ channel->xdev = xdev;
+ channel->channel_number = channel_number;
+
+ if (xdev->is_rootdma) {
+ channel->dev = xdev->dev;
+ channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
+ } else {
+ channel->dev = &xdev->pci_dev->dev;
+ channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
+ channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
+ }
+
+ channel->chan_base =
+ (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
+ (channel_number * DMA_CHANNEL_REGS_SIZE));
+
+ if ((channel->chan_base->dma_channel_status &
+ DMA_STATUS_DMA_PRES_BIT) == 0) {
+ dev_err(&platform_dev->dev,
+ "Hardware reports channel not present\n");
+ return -ENOTSUPP;
+ }
+
+ update_channel_read_attribute(channel);
+ update_channel_write_attribute(channel);
+
+ xlnx_match = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_channel_match),
+ GFP_KERNEL);
+
+ if (!xlnx_match)
+ return -ENOMEM;
+
+ if (xdev->is_rootdma) {
+ xlnx_match->pci_vendorid = xdev->rootdma_vendor;
+ xlnx_match->pci_deviceid = xdev->rootdma_device;
+ } else {
+ xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
+ xlnx_match->pci_deviceid = xdev->pci_dev->device;
+ xlnx_match->bar_params = xdev->bar_info;
+ }
+
+ xlnx_match->board_number = xdev->board_number;
+ xlnx_match->channel_number = channel_number;
+ xlnx_match->direction = xdev->channels[channel_number].direction;
+
+ channel->common.private = (void *)xlnx_match;
+
+ channel->common.device = &xdev->common;
+ list_add_tail(&channel->common.device_node, &xdev->common.channels);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
+{
+ mempool_destroy(chan->transactions_pool);
+
+ mempool_destroy(chan->tx_elements_pool);
+
+ mempool_destroy(chan->intr_transactions_pool);
+}
+
+static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+
+ if (chan->dstq_desc_cleanup)
+ destroy_workqueue(chan->dstq_desc_cleanup);
+
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+}
+
+static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
+{
+ kfree(chan->ppkt_ctx_srcq);
+
+ kfree(chan->ppkt_ctx_dstq);
+}
+
+static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
+{
+ ssize_t size;
+
+ if (chan->psrc_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+ }
+
+ if (chan->psrc_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+ }
+
+ if (chan->pdst_sta_bd) {
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
+ chan->dst_sta_bd_pa);
+ }
+}
+
+static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
+{
+ u32 reg = chan->coalesce_count;
+
+ reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
+
+ /* Enable Interrupts for channel */
+ ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
+ reg | DMA_INTCNTRL_ENABLINTR_BIT |
+ DMA_INTCNTRL_DMAERRINTR_BIT |
+ DMA_INTCNTRL_DMASGINTR_BIT);
+
+ /* Enable DMA */
+ ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
+ DMA_CNTRL_ENABL_BIT |
+ DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_AVAILABLE;
+ spin_unlock(&chan->channel_lock);
+
+ /* Activate timer if required */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_alloc_poll_timer(chan);
+
+ return 0;
+}
+
+static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
+{
+ /* Disable interrupts for Channel */
+ ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
+ DMA_INTCNTRL_ENABLINTR_BIT);
+
+ /* Delete timer if it is created */
+ if (chan->coalesce_count > 0 && !chan->poll_timer.function)
+ xlnx_ps_pcie_free_poll_timer(chan);
+
+ /* Flush descriptor cleaning work queues */
+ if (chan->primary_desc_cleanup)
+ flush_workqueue(chan->primary_desc_cleanup);
+
+ /* Flush channel programming work queue */
+ if (chan->chan_programming)
+ flush_workqueue(chan->chan_programming);
+
+ /* Clear the persistent bits */
+ ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
+ DMA_INTSTATUS_DMAERR_BIT |
+ DMA_INTSTATUS_SGLINTR_BIT |
+ DMA_INTSTATUS_SWINTR_BIT);
+
+ /* Disable DMA channel */
+ ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_UNAVIALBLE;
+ spin_unlock(&chan->channel_lock);
+}
+
+static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt;
+
+ rslt.result = result;
+ rslt.residue = 0;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
+}
+
+static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ struct dmaengine_result rslt, *prslt;
+
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&seg->async_tx);
+ spin_unlock(&chan->cookie_lock);
+
+ rslt.result = result;
+ if (seg->src_elements &&
+ chan->srcq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else if (seg->dst_elements &&
+ chan->dstq_buffer_location == BUFFER_LOC_PCI) {
+ rslt.residue = seg->total_transfer_bytes;
+ prslt = &rslt;
+ } else {
+ prslt = NULL;
+ }
+
+ dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
+}
+
+static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
+ struct ps_pcie_dma_chan *chan,
+ enum dmaengine_tx_result result)
+{
+ if (ppkt_ctxt->availability_status == IN_USE) {
+ if (ppkt_ctxt->seg) {
+ ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
+ mempool_free(ppkt_ctxt->seg,
+ chan->transactions_pool);
+ }
+ }
+}
+
+static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
+{
+ int i;
+ struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
+ struct ps_pcie_tx_segment *seg, *seg_nxt;
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+ struct ps_pcie_transfer_elements *ele, *ele_nxt;
+
+ if (chan->ppkt_ctx_srcq) {
+ if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
+ i = chan->idx_ctx_srcq_tail;
+ while (i != chan->idx_ctx_srcq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_srcq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_READ_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ if (chan->ppkt_ctx_dstq) {
+ if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
+ i = chan->idx_ctx_dstq_tail;
+ while (i != chan->idx_ctx_dstq_head) {
+ ppkt_ctxt = chan->ppkt_ctx_dstq + i;
+ ivk_cbk_ctx(ppkt_ctxt, chan,
+ DMA_TRANS_WRITE_FAILED);
+ memset(ppkt_ctxt, 0,
+ sizeof(struct PACKET_TRANSFER_PARAMS));
+ i++;
+ if (i == chan->total_descriptors)
+ i = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->active_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
+ ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_list_lock);
+ list_del(&seg->node);
+ spin_unlock(&chan->pending_list_lock);
+ list_for_each_entry_safe(ele, ele_nxt,
+ &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->pending_interrupts_list, node) {
+ ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
+ spin_lock(&chan->pending_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->pending_interrupts_lock);
+ mempool_free(intr_seg, chan->intr_transactions_pool);
+ }
+}
+
+static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
+{
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ ivk_cbk_for_pending(chan);
+
+ ps_pcie_chan_reset(chan);
+
+ init_sw_components(chan);
+ init_hw_components(chan);
+
+ xlnx_ps_pcie_channel_activate(chan);
+}
+
+static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->poll_timer.function) {
+ del_timer_sync(&chan->poll_timer);
+ chan->poll_timer.function = NULL;
+ }
+}
+
+static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
+{
+ timer_setup(&chan->poll_timer, poll_completed_transactions, 0);
+ chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
+
+ add_timer(&chan->poll_timer);
+
+ return 0;
+}
+
+static void terminate_transactions_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_terminate);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+ ivk_cbk_for_pending(chan);
+ xlnx_ps_pcie_channel_activate(chan);
+
+ complete(&chan->chan_terminate_complete);
+}
+
+static void chan_shutdown_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_shutdown);
+
+ xlnx_ps_pcie_channel_quiesce(chan);
+
+ complete(&chan->chan_shutdown_complt);
+}
+
+static void chan_reset_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_chan_reset);
+
+ xlnx_ps_pcie_reset_channel(chan);
+}
+
+static void sw_intr_work(struct work_struct *work)
+{
+ struct ps_pcie_dma_chan *chan =
+ (struct ps_pcie_dma_chan *)container_of(work,
+ struct ps_pcie_dma_chan, handle_sw_intrs);
+ struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
+
+ list_for_each_entry_safe(intr_seg, intr_seg_next,
+ &chan->active_interrupts_list, node) {
+ spin_lock(&chan->cookie_lock);
+ dma_cookie_complete(&intr_seg->async_intr_tx);
+ spin_unlock(&chan->cookie_lock);
+ dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
+ NULL);
+ spin_lock(&chan->active_interrupts_lock);
+ list_del(&intr_seg->node);
+ spin_unlock(&chan->active_interrupts_lock);
+ }
+}
+
+static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
+{
+ char wq_name[WORKQ_NAME_SIZE];
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d descriptor programming wq",
+ chan->channel_number);
+ chan->chan_programming =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->chan_programming) {
+ dev_err(chan->dev,
+ "Unable to create programming wq for chan %d",
+ chan->channel_number);
+ goto err_no_desc_program_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_programming,
+ ps_pcie_chan_program_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d primary cleanup wq", chan->channel_number);
+ chan->primary_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->primary_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create primary cleanup wq for channel %d",
+ chan->channel_number);
+ goto err_no_primary_clean_wq;
+ } else {
+ INIT_WORK(&chan->handle_primary_desc_cleanup,
+ ps_pcie_chan_primary_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d maintenance works wq",
+ chan->channel_number);
+ chan->maintenance_workq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->maintenance_workq) {
+ dev_err(chan->dev,
+ "Unable to create maintenance wq for channel %d",
+ chan->channel_number);
+ goto err_no_maintenance_wq;
+ } else {
+ INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
+ INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
+ INIT_WORK(&chan->handle_chan_terminate,
+ terminate_transactions_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d software Interrupts wq",
+ chan->channel_number);
+ chan->sw_intrs_wrkq =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->sw_intrs_wrkq) {
+ dev_err(chan->dev,
+ "Unable to create sw interrupts wq for channel %d",
+ chan->channel_number);
+ goto err_no_sw_intrs_wq;
+ } else {
+ INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+
+ if (chan->psrc_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d srcq handling wq",
+ chan->channel_number);
+ chan->srcq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->srcq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create src q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_src_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_srcq_desc_cleanup,
+ src_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ if (chan->pdst_sgl_bd) {
+ snprintf(wq_name, WORKQ_NAME_SIZE,
+ "PS PCIe channel %d dstq handling wq",
+ chan->channel_number);
+ chan->dstq_desc_cleanup =
+ create_singlethread_workqueue((const char *)wq_name);
+ if (!chan->dstq_desc_cleanup) {
+ dev_err(chan->dev,
+ "Unable to create dst q completion wq chan %d",
+ chan->channel_number);
+ goto err_no_dst_q_completion_wq;
+ } else {
+ INIT_WORK(&chan->handle_dstq_desc_cleanup,
+ dst_cleanup_work);
+ }
+ memset(wq_name, 0, WORKQ_NAME_SIZE);
+ }
+
+ return 0;
+err_no_dst_q_completion_wq:
+ if (chan->srcq_desc_cleanup)
+ destroy_workqueue(chan->srcq_desc_cleanup);
+err_no_src_q_completion_wq:
+ if (chan->sw_intrs_wrkq)
+ destroy_workqueue(chan->sw_intrs_wrkq);
+err_no_sw_intrs_wq:
+ if (chan->maintenance_workq)
+ destroy_workqueue(chan->maintenance_workq);
+err_no_maintenance_wq:
+ if (chan->primary_desc_cleanup)
+ destroy_workqueue(chan->primary_desc_cleanup);
+err_no_primary_clean_wq:
+ if (chan->chan_programming)
+ destroy_workqueue(chan->chan_programming);
+err_no_desc_program_wq:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
+{
+ chan->transactions_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_tx_segment));
+
+ if (!chan->transactions_pool)
+ goto no_transactions_pool;
+
+ chan->tx_elements_pool =
+ mempool_create_kmalloc_pool(chan->total_descriptors,
+ sizeof(struct ps_pcie_transfer_elements));
+
+ if (!chan->tx_elements_pool)
+ goto no_tx_elements_pool;
+
+ chan->intr_transactions_pool =
+ mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
+ sizeof(struct ps_pcie_intr_segment));
+
+ if (!chan->intr_transactions_pool)
+ goto no_intr_transactions_pool;
+
+ return 0;
+
+no_intr_transactions_pool:
+ mempool_destroy(chan->tx_elements_pool);
+no_tx_elements_pool:
+ mempool_destroy(chan->transactions_pool);
+no_transactions_pool:
+ return -ENOMEM;
+}
+
+static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
+{
+ if (chan->psrc_sgl_bd) {
+ chan->ppkt_ctx_srcq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_srcq) {
+ dev_err(chan->dev,
+ "Src pkt cxt allocation for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_src_pkt_ctx;
+ }
+ }
+
+ if (chan->pdst_sgl_bd) {
+ chan->ppkt_ctx_dstq =
+ kcalloc(chan->total_descriptors,
+ sizeof(struct PACKET_TRANSFER_PARAMS),
+ GFP_KERNEL);
+ if (!chan->ppkt_ctx_dstq) {
+ dev_err(chan->dev,
+ "Dst pkt cxt for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_dst_pkt_ctx;
+ }
+ }
+
+ return 0;
+
+err_no_dst_pkt_ctx:
+ kfree(chan->ppkt_ctx_srcq);
+
+err_no_src_pkt_ctx:
+ return -ENOMEM;
+}
+
+static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ void *sgl_base;
+ void *sta_base;
+ dma_addr_t phy_addr_sglbase;
+ dma_addr_t phy_addr_stabase;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+
+ sgl_base = dma_alloc_coherent(chan->dev, size, &phy_addr_sglbase,
+ GFP_KERNEL);
+
+ if (!sgl_base) {
+ dev_err(chan->dev,
+ "Sgl bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sgl_bds;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ sta_base = dma_alloc_coherent(chan->dev, size, &phy_addr_stabase,
+ GFP_KERNEL);
+
+ if (!sta_base) {
+ dev_err(chan->dev,
+ "Sta bds in two channel mode for chan %d failed\n",
+ chan->channel_number);
+ goto err_no_sta_bds;
+ }
+
+ if (chan->direction == DMA_TO_DEVICE) {
+ chan->psrc_sgl_bd = sgl_base;
+ chan->src_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->psrc_sta_bd = sta_base;
+ chan->src_sta_bd_pa = phy_addr_stabase;
+
+ chan->pdst_sgl_bd = NULL;
+ chan->dst_sgl_bd_pa = 0;
+
+ chan->pdst_sta_bd = NULL;
+ chan->dst_sta_bd_pa = 0;
+
+ } else if (chan->direction == DMA_FROM_DEVICE) {
+ chan->psrc_sgl_bd = NULL;
+ chan->src_sgl_bd_pa = 0;
+
+ chan->psrc_sta_bd = NULL;
+ chan->src_sta_bd_pa = 0;
+
+ chan->pdst_sgl_bd = sgl_base;
+ chan->dst_sgl_bd_pa = phy_addr_sglbase;
+
+ chan->pdst_sta_bd = sta_base;
+ chan->dst_sta_bd_pa = phy_addr_stabase;
+
+ } else {
+ dev_err(chan->dev,
+ "%d %s() Unsupported channel direction\n",
+ __LINE__, __func__);
+ goto unsupported_channel_direction;
+ }
+
+ return 0;
+
+unsupported_channel_direction:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
+err_no_sta_bds:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
+err_no_sgl_bds:
+
+ return -ENOMEM;
+}
+
+static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
+{
+ size_t size;
+
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ chan->psrc_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail src q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
+ chan->pdst_sgl_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sgl_bd) {
+ dev_err(chan->dev,
+ "Alloc fail dst q buffer descriptors for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sgl_descriptors;
+ }
+
+ size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
+ chan->psrc_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->psrc_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate src q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_src_sta_descriptors;
+ }
+
+ chan->pdst_sta_bd =
+ dma_alloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
+ GFP_KERNEL);
+
+ if (!chan->pdst_sta_bd) {
+ dev_err(chan->dev,
+ "Unable to allocate Dst q status bds for chan %d\n",
+ chan->channel_number);
+ goto err_no_dst_sta_descriptors;
+ }
+
+ return 0;
+
+err_no_dst_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct STATUS_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
+ chan->src_sta_bd_pa);
+err_no_src_sta_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct DEST_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
+ chan->dst_sgl_bd_pa);
+err_no_dst_sgl_descriptors:
+ size = chan->total_descriptors *
+ sizeof(struct SOURCE_DMA_DESCRIPTOR);
+ dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
+ chan->src_sgl_bd_pa);
+
+err_no_src_sgl_descriptors:
+ return -ENOMEM;
+}
+
+static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return;
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
+ return;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_shutdown_complt))
+ reinit_completion(&chan->chan_shutdown_complt);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_shutdown);
+ wait_for_completion_interruptible(&chan->chan_shutdown_complt);
+
+ xlnx_ps_pcie_free_worker_queues(chan);
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+ xlnx_ps_pcie_destroy_mempool(chan);
+ xlnx_ps_pcie_free_descriptors(chan);
+
+ spin_lock(&chan->channel_lock);
+ chan->state = CHANNEL_RESOURCE_UNALLOCATED;
+ spin_unlock(&chan->channel_lock);
+ }
+}
+
+static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!dchan)
+ return PTR_ERR(dchan);
+
+ chan = to_xilinx_chan(dchan);
+
+ if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
+ return 0;
+
+ if (chan->num_queues == DEFAULT_DMA_QUEUES) {
+ if (dma_alloc_decriptors_all_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ } else if (chan->num_queues == TWO_DMA_QUEUES) {
+ if (dma_alloc_descriptors_two_queues(chan) != 0) {
+ dev_err(chan->dev,
+ "Alloc fail bds for two queues of channel %d\n",
+ chan->channel_number);
+ goto err_no_descriptors;
+ }
+ }
+
+ if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate memory pool for channel %d\n",
+ chan->channel_number);
+ goto err_no_mempools;
+ }
+
+ if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate packet contexts for channel %d\n",
+ chan->channel_number);
+ goto err_no_pkt_ctxts;
+ }
+
+ if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
+ dev_err(chan->dev,
+ "Unable to allocate worker queues for channel %d\n",
+ chan->channel_number);
+ goto err_no_worker_queues;
+ }
+
+ xlnx_ps_pcie_reset_channel(chan);
+
+ dma_cookie_init(dchan);
+
+ return 0;
+
+err_no_worker_queues:
+ xlnx_ps_pcie_free_pkt_ctxts(chan);
+err_no_pkt_ctxts:
+ xlnx_ps_pcie_destroy_mempool(chan);
+err_no_mempools:
+ xlnx_ps_pcie_free_descriptors(chan);
+err_no_descriptors:
+ return -ENOMEM;
+}
+
+static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_intr_segment *intr_seg =
+ to_ps_pcie_dma_tx_intr_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_interrupts_lock);
+ list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
+ spin_unlock(&chan->pending_interrupts_lock);
+
+ return cookie;
+}
+
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return -EINVAL;
+
+ spin_lock(&chan->cookie_lock);
+ cookie = dma_cookie_assign(tx);
+ spin_unlock(&chan->cookie_lock);
+
+ spin_lock(&chan->pending_list_lock);
+ list_add_tail(&seg->node, &chan->pending_list);
+ spin_unlock(&chan->pending_list_lock);
+
+ return cookie;
+}
+
+/**
+ * xlnx_ps_pcie_dma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @channel: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len,
+ unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (chan->num_queues != DEFAULT_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Tx segment alloc for channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+ INIT_LIST_HEAD(&seg->transfer_nodes);
+
+ for (i = 0; i < len / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+
+ if (len % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+ ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
+ ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
+ ele->transfer_bytes = len % MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->src_elements++;
+ seg->dst_elements++;
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+
+ if (seg->src_elements > chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_memcpy;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_memcpy:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
+ struct dma_chan *channel, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
+ struct ps_pcie_tx_segment *seg = NULL;
+ struct scatterlist *sgl_ptr;
+ struct ps_pcie_transfer_elements *ele = NULL;
+ struct ps_pcie_transfer_elements *ele_nxt = NULL;
+ u32 i, j;
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ if (!(is_slave_direction(direction)))
+ return NULL;
+
+ if (!sgl || sg_len == 0)
+ return NULL;
+
+ if (chan->num_queues != TWO_DMA_QUEUES) {
+ dev_err(chan->dev, "Only prep_dma_memcpy is supported channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
+ if (!seg) {
+ dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
+ chan->channel_number);
+ return NULL;
+ }
+
+ memset(seg, 0, sizeof(*seg));
+
+ for_each_sg(sgl, sgl_ptr, sg_len, j) {
+ for (i = 0; i < sg_dma_len(sgl_ptr) / MAX_TRANSFER_LENGTH; i++) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ ele = NULL;
+ }
+ if (sg_dma_len(sgl_ptr) % MAX_TRANSFER_LENGTH) {
+ ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
+ if (!ele) {
+ dev_err(chan->dev, "Tx element %d for channel %d\n",
+ i, chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+ if (chan->direction == DMA_TO_DEVICE) {
+ ele->src_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->src_elements++;
+ } else {
+ ele->dst_pa = sg_dma_address(sgl_ptr) +
+ (i * MAX_TRANSFER_LENGTH);
+ seg->dst_elements++;
+ }
+ ele->transfer_bytes = sg_dma_len(sgl_ptr) %
+ MAX_TRANSFER_LENGTH;
+ list_add_tail(&ele->node, &seg->transfer_nodes);
+ seg->total_transfer_bytes += ele->transfer_bytes;
+ }
+ }
+
+ if (max(seg->src_elements, seg->dst_elements) >
+ chan->total_descriptors) {
+ dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
+ chan->channel_number);
+ goto err_elements_prep_slave_sg;
+ }
+
+ dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
+ seg->async_tx.flags = flags;
+ async_tx_ack(&seg->async_tx);
+ seg->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ return &seg->async_tx;
+
+err_elements_prep_slave_sg:
+ list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
+ list_del(&ele->node);
+ mempool_free(ele, chan->tx_elements_pool);
+ }
+ mempool_free(seg, chan->transactions_pool);
+ return NULL;
+}
+
+static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return;
+
+ chan = to_xilinx_chan(channel);
+
+ if (!list_empty(&chan->pending_list)) {
+ spin_lock(&chan->pending_list_lock);
+ spin_lock(&chan->active_list_lock);
+ list_splice_tail_init(&chan->pending_list,
+ &chan->active_list);
+ spin_unlock(&chan->active_list_lock);
+ spin_unlock(&chan->pending_list_lock);
+ }
+
+ if (!list_empty(&chan->pending_interrupts_list)) {
+ spin_lock(&chan->pending_interrupts_lock);
+ spin_lock(&chan->active_interrupts_lock);
+ list_splice_tail_init(&chan->pending_interrupts_list,
+ &chan->active_interrupts_list);
+ spin_unlock(&chan->active_interrupts_lock);
+ spin_unlock(&chan->pending_interrupts_lock);
+ }
+
+ if (chan->chan_programming)
+ queue_work(chan->chan_programming,
+ &chan->handle_chan_programming);
+}
+
+static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
+{
+ struct ps_pcie_dma_chan *chan;
+
+ if (!channel)
+ return PTR_ERR(channel);
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return 1;
+
+ if (chan->maintenance_workq) {
+ if (completion_done(&chan->chan_terminate_complete))
+ reinit_completion(&chan->chan_terminate_complete);
+ queue_work(chan->maintenance_workq,
+ &chan->handle_chan_terminate);
+ wait_for_completion_interruptible(
+ &chan->chan_terminate_complete);
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
+ struct dma_chan *channel, unsigned long flags)
+{
+ struct ps_pcie_dma_chan *chan;
+ struct ps_pcie_intr_segment *intr_segment = NULL;
+
+ if (!channel)
+ return NULL;
+
+ chan = to_xilinx_chan(channel);
+
+ if (chan->state != CHANNEL_AVAILABLE)
+ return NULL;
+
+ intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
+
+ memset(intr_segment, 0, sizeof(*intr_segment));
+
+ dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
+ &chan->common);
+ intr_segment->async_intr_tx.flags = flags;
+ async_tx_ack(&intr_segment->async_intr_tx);
+ intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
+
+ return &intr_segment->async_intr_tx;
+}
+
+static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
+{
+ int err, i;
+ struct xlnx_pcie_dma_device *xdev;
+ static u16 board_number;
+
+ xdev = devm_kzalloc(&platform_dev->dev,
+ sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
+
+ if (!xdev)
+ return -ENOMEM;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ xdev->dma_buf_ext_addr = true;
+#else
+ xdev->dma_buf_ext_addr = false;
+#endif
+
+ xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
+ "rootdma");
+
+ xdev->dev = &platform_dev->dev;
+ xdev->board_number = board_number;
+
+ err = device_property_read_u32(&platform_dev->dev, "numchannels",
+ &xdev->num_channels);
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to find numchannels property\n");
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->num_channels == 0 || xdev->num_channels >
+ MAX_ALLOWED_CHANNELS_IN_HW) {
+ dev_warn(&platform_dev->dev,
+ "Invalid xlnx-num_channels property value\n");
+ xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
+ }
+
+ xdev->channels =
+ (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
+ sizeof(struct ps_pcie_dma_chan)
+ * xdev->num_channels,
+ GFP_KERNEL);
+ if (!xdev->channels) {
+ err = -ENOMEM;
+ goto platform_driver_probe_return;
+ }
+
+ if (xdev->is_rootdma)
+ err = read_rootdma_config(platform_dev, xdev);
+ else
+ err = read_epdma_config(platform_dev, xdev);
+
+ if (err) {
+ dev_err(&platform_dev->dev,
+ "Unable to initialize dma configuration\n");
+ goto platform_driver_probe_return;
+ }
+
+ /* Initialize the DMA engine */
+ INIT_LIST_HEAD(&xdev->common.channels);
+
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+
+ xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ xdev->common.device_alloc_chan_resources =
+ xlnx_ps_pcie_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xlnx_ps_pcie_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
+ xdev->common.device_tx_status = dma_cookie_status;
+ xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
+ xdev->common.device_prep_dma_interrupt =
+ xlnx_ps_pcie_dma_prep_interrupt;
+ xdev->common.device_prep_dma_memcpy = xlnx_ps_pcie_dma_prep_memcpy;
+ xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
+ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ for (i = 0; i < xdev->num_channels; i++) {
+ err = probe_channel_properties(platform_dev, xdev, i);
+
+ if (err != 0) {
+ dev_err(xdev->dev,
+ "Unable to read channel properties\n");
+ goto platform_driver_probe_return;
+ }
+ }
+
+ if (xdev->is_rootdma)
+ err = platform_irq_setup(xdev);
+ else
+ err = irq_setup(xdev);
+ if (err) {
+ dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev,
+ "Unable to register board %d with dma framework\n",
+ xdev->board_number);
+ goto platform_driver_probe_return;
+ }
+
+ platform_set_drvdata(platform_dev, xdev);
+
+ board_number++;
+
+ dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
+ return 0;
+
+platform_driver_probe_return:
+ return err;
+}
+
+static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
+{
+ struct xlnx_pcie_dma_device *xdev =
+ platform_get_drvdata(platform_dev);
+ int i;
+
+ for (i = 0; i < xdev->num_channels; i++)
+ xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
+
+ dma_async_device_unregister(&xdev->common);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
+ { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
+#endif
+
+static struct platform_driver xlnx_pcie_dma_driver = {
+ .driver = {
+ .name = XLNX_PLATFORM_DRIVER_NAME,
+ .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
+ .owner = THIS_MODULE,
+ },
+ .probe = xlnx_pcie_dma_driver_probe,
+ .remove = xlnx_pcie_dma_driver_remove,
+};
+
+int dma_platform_driver_register(void)
+{
+ return platform_driver_register(&xlnx_pcie_dma_driver);
+}
+
+void dma_platform_driver_unregister(void)
+{
+ platform_driver_unregister(&xlnx_pcie_dma_driver);
+}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index ff253696d183..9f1f25e8061a 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -454,6 +454,7 @@ static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
list_for_each_entry_safe(desc, next, list, node)
zynqmp_dma_free_descriptor(chan, desc);
+ INIT_LIST_HEAD(list);
}
/**
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fe2eb892a1bd..3f00eddbe0f8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -334,6 +334,13 @@ config EDAC_CPC925
a companion chip to the PowerPC 970 family of
processors.
+config EDAC_PL310_L2
+ tristate "Pl310 L2 Cache Controller"
+ depends on ARM
+ help
+ Support for parity error detection on L2 cache controller
+ data and tag ram memory
+
config EDAC_HIGHBANK_MC
tristate "Highbank Memory Controller"
depends on ARCH_HIGHBANK
@@ -480,6 +487,13 @@ config EDAC_SYNOPSYS
Support for error detection and correction on the Synopsys DDR
memory controller.
+config EDAC_ZYNQMP_OCM
+ tristate "Xilinx ZynqMP OCM Controller"
+ depends on ARCH_ZYNQMP
+ help
+ Support for error detection and correction on the xilinx ZynqMP OCM
+ controller.
+
config EDAC_XGENE
tristate "APM X-Gene SoC"
depends on (ARM64 || COMPILE_TEST)
@@ -487,6 +501,14 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_CORTEX_ARM64
+ tristate "ARM Cortex A57/A53"
+ default y if !CPU_IDLE
+ depends on !CPU_IDLE && ARM64
+ help
+ Support for error detection and correction on the
+ ARM Cortex A57 and A53.
+
config EDAC_TI
tristate "Texas Instruments DDR3 ECC Controller"
depends on ARCH_KEYSTONE || SOC_DRA7XX
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 269e15118cea..fbd60ca9f26c 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -79,10 +79,13 @@ obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
+obj-$(CONFIG_EDAC_PL310_L2) += pl310_edac_l2.o
obj-$(CONFIG_EDAC_SIFIVE) += sifive_edac.o
obj-$(CONFIG_EDAC_ARMADA_XP) += armada_xp_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_CORTEX_ARM64) += cortex_arm64_edac.o
+obj-$(CONFIG_EDAC_ZYNQMP_OCM) += zynqmp_ocm_edac.o
obj-$(CONFIG_EDAC_TI) += ti_edac.o
obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
diff --git a/drivers/edac/cortex_arm64_edac.c b/drivers/edac/cortex_arm64_edac.c
new file mode 100644
index 000000000000..db89ee0c3cc3
--- /dev/null
+++ b/drivers/edac/cortex_arm64_edac.c
@@ -0,0 +1,470 @@
+/*
+ * Cortex A57 and A53 EDAC
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijeshkumar.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+#define DRV_NAME "cortex_edac"
+
+#define CPUMERRSR_EL1_INDEX(x, y) ((x) & (y))
+#define CPUMERRSR_EL1_BANK_WAY(x, y) (((x) >> 18) & (y))
+#define CPUMERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define CPUMERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define CPUMERRSR_EL1_REPEAT(x) (((x) >> 32) & 0x7f)
+#define CPUMERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define CPUMERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define L1_D_DIRTY_RAM 0x14
+#define TLB_RAM 0x18
+
+#define L2MERRSR_EL1_CPUID_WAY(x) (((x) >> 18) & 0xf)
+#define L2MERRSR_EL1_RAMID(x) (((x) >> 24) & 0x7f)
+#define L2MERRSR_EL1_VALID(x) ((x) & (1 << 31))
+#define L2MERRSR_EL1_REPEAT(x) (((x) >> 32) & 0xff)
+#define L2MERRSR_EL1_OTHER(x) (((x) >> 40) & 0xff)
+#define L2MERRSR_EL1_FATAL(x) ((x) & (1UL << 63))
+#define L2_TAG_RAM 0x10
+#define L2_DATA_RAM 0x11
+#define L2_SNOOP_RAM 0x12
+#define L2_DIRTY_RAM 0x14
+#define L2_INCLUSION_PF_RAM 0x18
+
+#define L1_CACHE 0
+#define L2_CACHE 1
+
+#define EDAC_MOD_STR DRV_NAME
+
+/* Error injectin macros*/
+#define L1_DCACHE_ERRINJ_ENABLE (1 << 6)
+#define L1_DCACHE_ERRINJ_DISABLE (~(1 << 6))
+#define L2_DCACHE_ERRINJ_ENABLE (1 << 29)
+#define L2_DCACHE_ERRINJ_DISABLE (~(1 << 29))
+#define L2_ECC_PROTECTION (1 << 22)
+
+static int poll_msec = 100;
+
+struct cortex_arm64_edac {
+ struct edac_device_ctl_info *edac_ctl;
+};
+
+static inline u64 read_cpumerrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_2" : "=r" (val));
+ return val;
+}
+
+static inline void write_cpumerrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_2, %0" :: "r" (val));
+}
+
+static inline u64 read_l2merrsr_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c2_3" : "=r" (val));
+ return val;
+}
+
+static inline void write_l2merrsr_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c2_3, %0" :: "r" (val));
+}
+
+static inline void cortexa53_edac_busy_on_inst(void)
+{
+ asm volatile("isb sy");
+}
+
+static inline void cortexa53_edac_busy_on_data(void)
+{
+ asm volatile("dsb sy");
+}
+
+static inline void write_l2actrl_el1(u64 val)
+{
+ asm volatile("msr s3_1_c15_c0_0, %0" :: "r" (val));
+ cortexa53_edac_busy_on_inst();
+}
+
+static inline u64 read_l2actrl_el1(void)
+{
+ u64 val;
+
+ asm volatile("mrs %0, s3_1_c15_c0_0" : "=r" (val));
+ return val;
+}
+
+static inline u64 read_l2ctlr_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C11_C0_2" : "=r" (rval));
+ return rval;
+
+}
+
+static inline u64 read_l1actrl_el1(void)
+{
+ u64 rval;
+
+ asm volatile("mrs %0, S3_1_C15_C2_0" : "=r" (rval));
+ return rval;
+}
+
+static inline void write_l1actrl_el1(u64 val)
+{
+ asm volatile("msr S3_1_C15_C2_0, %0" :: "r" (val));
+}
+
+static void parse_cpumerrsr(void *arg)
+{
+ int cpu, partnum, way;
+ unsigned int index = 0;
+ u64 val = read_cpumerrsr_el1();
+ int repeat_err, other_err;
+
+ /* we do not support fatal error handling so far */
+ if (CPUMERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!CPUMERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = CPUMERRSR_EL1_REPEAT(val);
+ other_err = CPUMERRSR_EL1_OTHER(val);
+
+ /* way/bank and index address bit ranges are different between
+ * A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57) {
+ index = CPUMERRSR_EL1_INDEX(val, 0x1ffff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x1f);
+ } else {
+ index = CPUMERRSR_EL1_INDEX(val, 0xfff);
+ way = CPUMERRSR_EL1_BANK_WAY(val, 0x7);
+ }
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L1 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x, RAMID=", index);
+
+ switch (CPUMERRSR_EL1_RAMID(val)) {
+ case L1_I_TAG_RAM:
+ pr_cont("'L1-I Tag RAM' (way %d)", way);
+ break;
+ case L1_I_DATA_RAM:
+ pr_cont("'L1-I Data RAM' (bank %d)", way);
+ break;
+ case L1_D_TAG_RAM:
+ pr_cont("'L1-D Tag RAM' (way %d)", way);
+ break;
+ case L1_D_DATA_RAM:
+ pr_cont("'L1-D Data RAM' (bank %d)", way);
+ break;
+ case L1_D_DIRTY_RAM:
+ pr_cont("'L1 Dirty RAM'");
+ break;
+ case TLB_RAM:
+ pr_cont("'TLB RAM'");
+ break;
+ default:
+ pr_cont("'unknown'");
+ break;
+ }
+
+ pr_cont(", repeat=%d, other=%d (CPUMERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L1 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_cpumerrsr_el1(0);
+}
+
+static void a57_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ case L2_DATA_RAM:
+ case L2_DIRTY_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 2);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", (val & 0x6) >> 1,
+ (val & 0x1));
+ break;
+ }
+}
+
+static void a53_parse_l2merrsr_way(u8 ramid, u8 val)
+{
+ switch (ramid) {
+ case L2_TAG_RAM:
+ pr_cont("(way %d)", val);
+ case L2_DATA_RAM:
+ pr_cont("(bank %d)", val);
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("(cpu%d tag, way %d)", val / 2, val % 4);
+ break;
+ }
+}
+
+static void parse_l2merrsr(void *arg)
+{
+ int cpu, partnum;
+ unsigned int index;
+ int repeat_err, other_err;
+ u64 val = read_l2merrsr_el1();
+
+ /* we do not support fatal error handling so far */
+ if (L2MERRSR_EL1_FATAL(val))
+ return;
+
+ /* check if we have valid error before continuing */
+ if (!L2MERRSR_EL1_VALID(val))
+ return;
+
+ cpu = smp_processor_id();
+ partnum = read_cpuid_part_number();
+ repeat_err = L2MERRSR_EL1_REPEAT(val);
+ other_err = L2MERRSR_EL1_OTHER(val);
+
+ /* index address range is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ index = val & 0x1ffff;
+ else
+ index = (val >> 3) & 0x3fff;
+
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "CPU%d L2 error detected!\n", cpu);
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "index=%#x RAMID=", index);
+
+ switch (L2MERRSR_EL1_RAMID(val)) {
+ case L2_TAG_RAM:
+ pr_cont("'L2 Tag RAM'");
+ break;
+ case L2_DATA_RAM:
+ pr_cont("'L2 Data RAM'");
+ break;
+ case L2_SNOOP_RAM:
+ pr_cont("'L2 Snoop tag RAM'");
+ break;
+ case L2_DIRTY_RAM:
+ pr_cont("'L2 Dirty RAM'");
+ break;
+ case L2_INCLUSION_PF_RAM:
+ pr_cont("'L2 inclusion PF RAM'");
+ break;
+ default:
+ pr_cont("unknown");
+ break;
+ }
+
+ /* cpuid/way bit description is different between A57 and A53 */
+ if (partnum == ARM_CPU_PART_CORTEX_A57)
+ a57_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+ else
+ a53_parse_l2merrsr_way(L2MERRSR_EL1_RAMID(val),
+ L2MERRSR_EL1_CPUID_WAY(val));
+
+ pr_cont(", repeat=%d, other=%d (L2MERRSR_EL1=%#llx)\n", repeat_err,
+ other_err, val);
+ trace_mc_event(HW_EVENT_ERR_CORRECTED, "L2 non-fatal error",
+ "", repeat_err, 0, 0, 0, -1, index, 0, 0, DRV_NAME);
+ write_l2merrsr_el1(0);
+}
+
+static void cortex_arm64_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ int cpu;
+ struct cpumask cluster_mask, old_mask;
+
+ cpumask_clear(&cluster_mask);
+ cpumask_clear(&old_mask);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ /* Check CPU L1 error */
+ smp_call_function_single(cpu, parse_cpumerrsr, NULL, 0);
+ cpumask_copy(&cluster_mask, topology_core_cpumask(cpu));
+ if (cpumask_equal(&cluster_mask, &old_mask))
+ continue;
+ cpumask_copy(&old_mask, &cluster_mask);
+ /* Check CPU L2 error */
+ smp_call_function_any(&cluster_mask, parse_l2merrsr, NULL, 0);
+ }
+ put_online_cpus();
+}
+
+static ssize_t cortexa53_edac_inject_L2_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L2ACTLR_EL1: [0x%llx]\n\r", read_l2actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L2_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l2actrl, l2ecc;
+
+ if (!data)
+ return -EFAULT;
+
+ l2ecc = read_l2ctlr_el1();
+ if ((l2ecc & L2_ECC_PROTECTION)) {
+ l2actrl = read_l2actrl_el1();
+ l2actrl = l2actrl | L2_DCACHE_ERRINJ_ENABLE;
+ write_l2actrl_el1(l2actrl);
+ cortexa53_edac_busy_on_inst();
+ } else {
+ edac_printk(KERN_CRIT, EDAC_MOD_STR, "L2 ECC not enabled\n");
+ }
+
+ return count;
+}
+
+static ssize_t cortexa53_edac_inject_L1_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ return sprintf(data, "L1CTLR_EL1: [0x%llx]\n\r", read_l1actrl_el1());
+}
+
+static ssize_t cortexa53_edac_inject_L1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ u64 l1actrl;
+
+ if (!data)
+ return -EFAULT;
+
+ l1actrl = read_l1actrl_el1();
+ l1actrl |= L1_DCACHE_ERRINJ_ENABLE;
+ write_l1actrl_el1(l1actrl);
+ cortexa53_edac_busy_on_inst();
+
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute cortexa53_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_L2_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L2_show,
+ .store = cortexa53_edac_inject_L2_store},
+ {
+ .attr = {
+ .name = "inject_L1_Cache_Error",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = cortexa53_edac_inject_L1_show,
+ .store = cortexa53_edac_inject_L1_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void cortexa53_set_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = cortexa53_edac_sysfs_attributes;
+}
+
+static int cortex_arm64_edac_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct cortex_arm64_edac *drv;
+ struct device *dev = &pdev->dev;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ /* Only POLL mode is supported */
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ drv->edac_ctl = edac_device_alloc_ctl_info(0, "cpu_cache", 1, "L", 2,
+ 0, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(drv->edac_ctl))
+ return -ENOMEM;
+
+ drv->edac_ctl->poll_msec = poll_msec;
+ drv->edac_ctl->edac_check = cortex_arm64_edac_check;
+ drv->edac_ctl->dev = dev;
+ drv->edac_ctl->mod_name = dev_name(dev);
+ drv->edac_ctl->dev_name = dev_name(dev);
+ drv->edac_ctl->ctl_name = "cache_err";
+ platform_set_drvdata(pdev, drv);
+
+ cortexa53_set_edac_sysfs_attributes(drv->edac_ctl);
+
+ rc = edac_device_add_device(drv->edac_ctl);
+ if (rc)
+ edac_device_free_ctl_info(drv->edac_ctl);
+
+ return rc;
+}
+
+static int cortex_arm64_edac_remove(struct platform_device *pdev)
+{
+ struct cortex_arm64_edac *drv = dev_get_drvdata(&pdev->dev);
+ struct edac_device_ctl_info *edac_ctl = drv->edac_ctl;
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+
+ return 0;
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a57-edac" },
+ { .compatible = "arm,cortex-a53-edac" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver cortex_arm64_edac_driver = {
+ .probe = cortex_arm64_edac_probe,
+ .remove = cortex_arm64_edac_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = cortex_arm64_edac_of_match,
+ },
+};
+module_platform_driver(cortex_arm64_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brijesh Singh <brijeshkumar.singh@amd.com>");
+MODULE_DESCRIPTION("Cortex A57 and A53 EDAC driver");
+module_param(poll_msec, int, 0444);
+MODULE_PARM_DESC(poll_msec, "EDAC monitor poll interval in msec");
diff --git a/drivers/edac/pl310_edac_l2.c b/drivers/edac/pl310_edac_l2.c
new file mode 100644
index 000000000000..57f2f5b022d8
--- /dev/null
+++ b/drivers/edac/pl310_edac_l2.c
@@ -0,0 +1,233 @@
+/*
+ * Pl310 L2 Cache EDAC Driver
+ *
+ * Copyright (C) 2013-2014 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "edac_module.h"
+
+/* Auxilary control register definitions */
+#define L2X0_AUX_CTRL_PARITY_MASK BIT(21)
+
+/* Interrupt imask/status/clear register definitions */
+#define L2X0_INTR_PARRD_MASK 0x4
+#define L2X0_INTR_PARRT_MASK 0x2
+
+/**
+ * struct pl310_edac_l2_priv - Zynq L2 cache controller private instance data
+ * @base: Base address of the controller
+ * @irq: Interrupt number
+ */
+struct pl310_edac_l2_priv {
+ void __iomem *base;
+ int irq;
+};
+
+/**
+ * pl310_edac_l2_parityerr_check - Check controller staus for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors
+ */
+static void pl310_edac_l2_parityerr_check(struct edac_device_ctl_info *dci)
+{
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ regval = readl(priv->base + L2X0_RAW_INTR_STAT);
+ if (regval & L2X0_INTR_PARRD_MASK) {
+ /* Data parity error will be reported as correctable error */
+ writel(L2X0_INTR_PARRD_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
+ }
+ if (regval & L2X0_INTR_PARRT_MASK) {
+ /* tag parity error will be reported as uncorrectable error */
+ writel(L2X0_INTR_PARRT_MASK, priv->base + L2X0_INTR_CLEAR);
+ edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
+ }
+}
+
+/**
+ * pl310_edac_l2_int_handler - ISR fucntion for l2cahe controller
+ * @irq: Irq Number
+ * @device: Pointer to the edac device controller instance
+ *
+ * This routine is triggered whenever there is parity error detected
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t pl310_edac_l2_int_handler(int irq, void *device)
+{
+ pl310_edac_l2_parityerr_check((struct edac_device_ctl_info *)device);
+ return IRQ_HANDLED;
+}
+
+/**
+ * pl310_edac_l2_poll_handler - Poll the status reg for parity errors
+ * @dci: Pointer to the edac device controller instance
+ *
+ * This routine is used to check and post parity errors and is called by
+ * the EDAC polling thread
+ */
+static void pl310_edac_l2_poll_handler(struct edac_device_ctl_info *dci)
+{
+ pl310_edac_l2_parityerr_check(dci);
+}
+
+/**
+ * pl310_edac_l2_get_paritystate - check the parity enable/disable status
+ * @base: Pointer to the contoller base address
+ *
+ * This routine returns the parity enable/diable status for the controller
+ *
+ * Return: true/false - parity enabled/disabled.
+ */
+static bool pl310_edac_l2_get_paritystate(void __iomem *base)
+{
+ u32 regval;
+
+ regval = readl(base + L2X0_AUX_CTRL);
+ if (regval & L2X0_AUX_CTRL_PARITY_MASK)
+ return true;
+
+ return false;
+}
+
+/**
+ * pl310_edac_l2_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine probes a specific arm,pl310-cache instance for binding
+ * with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int pl310_edac_l2_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct pl310_edac_l2_priv *priv;
+ int rc;
+ struct resource *res;
+ void __iomem *baseaddr;
+ u32 regval;
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ /* Check for the ecc enable status */
+ if (pl310_edac_l2_get_paritystate(baseaddr) == false) {
+ dev_err(&pdev->dev, "parity check not enabled\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), "l2cache",
+ 1, "L", 1, 1, NULL, 0,
+ edac_device_alloc_index());
+ if (IS_ERR(dci))
+ return PTR_ERR(dci);
+
+ priv = dci->pvt_info;
+ priv->base = baseaddr;
+ dci->dev = &pdev->dev;
+ dci->mod_name = "pl310_edac_l2";
+ dci->ctl_name = "pl310_l2_controller";
+ dci->dev_name = dev_name(&pdev->dev);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ rc = devm_request_irq(&pdev->dev, priv->irq,
+ pl310_edac_l2_int_handler,
+ 0, dev_name(&pdev->dev), (void *)dci);
+ if (rc < 0) {
+ dci->edac_check = pl310_edac_l2_poll_handler;
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ goto del_edac_device;
+ }
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval |= (L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ return rc;
+
+del_edac_device:
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return rc;
+}
+
+/**
+ * pl310_edac_l2_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * This routine unbinds the EDAC device controller instance associated
+ * with the specified arm,pl310-cache controller described by the
+ * OpenFirmware device tree node passed as a parameter.
+ *
+ * Return: Always returns 0
+ */
+static int pl310_edac_l2_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct pl310_edac_l2_priv *priv = dci->pvt_info;
+ u32 regval;
+
+ if (edac_op_state != EDAC_OPSTATE_POLL) {
+ regval = readl(priv->base+L2X0_INTR_MASK);
+ regval &= ~(L2X0_INTR_PARRD_MASK | L2X0_INTR_PARRT_MASK);
+ writel(regval, priv->base+L2X0_INTR_MASK);
+ }
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+/* Device tree node type and compatible tuples this driver can match on */
+static const struct of_device_id pl310_edac_l2_match[] = {
+ { .compatible = "arm,pl310-cache", },
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, pl310_edac_l2_match);
+
+static struct platform_driver pl310_edac_l2_driver = {
+ .driver = {
+ .name = "pl310-edac-l2",
+ .of_match_table = pl310_edac_l2_match,
+ },
+ .probe = pl310_edac_l2_probe,
+ .remove = pl310_edac_l2_remove,
+};
+
+module_platform_driver(pl310_edac_l2_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("pl310 L2 EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/edac/zynqmp_ocm_edac.c b/drivers/edac/zynqmp_ocm_edac.c
new file mode 100644
index 000000000000..4957a8c9d02d
--- /dev/null
+++ b/drivers/edac/zynqmp_ocm_edac.c
@@ -0,0 +1,651 @@
+/*
+ * Xilinx ZynqMP OCM ECC Driver
+ * This driver is based on mpc85xx_edac.c drivers
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details
+ */
+
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include "edac_module.h"
+
+#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
+
+#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
+#define ZYNQMP_OCM_EDAC_MOD_VER "1"
+
+/* Controller registers */
+#define CTRL_OFST 0x0
+#define OCM_ISR_OFST 0x04
+#define OCM_IMR_OFST 0x08
+#define OCM_IEN_OFST 0x0C
+#define OCM_IDS_OFST 0x10
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0x14
+
+/* Correctable error info registers */
+#define CE_FFA_OFST 0x1C
+#define CE_FFD0_OFST 0x20
+#define CE_FFD1_OFST 0x24
+#define CE_FFD2_OFST 0x28
+#define CE_FFD3_OFST 0x2C
+#define CE_FFE_OFST 0x30
+
+/* Uncorrectable error info registers */
+#define UE_FFA_OFST 0x34
+#define UE_FFD0_OFST 0x38
+#define UE_FFD1_OFST 0x3C
+#define UE_FFD2_OFST 0x40
+#define UE_FFD3_OFST 0x44
+#define UE_FFE_OFST 0x48
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x40
+#define ECC_CTRL_CLR_UE_ERR 0x80
+
+/* Fault injection data and count registers */
+#define OCM_FID0_OFST 0x4C
+#define OCM_FID1_OFST 0x50
+#define OCM_FID2_OFST 0x54
+#define OCM_FID3_OFST 0x58
+#define OCM_FIC_OFST 0x74
+
+/* Interrupt masks */
+#define OCM_CEINTR_MASK 0x40
+#define OCM_UEINTR_MASK 0x80
+#define OCM_ECC_ENABLE_MASK 0x1
+#define OCM_FICOUNT_MASK 0x0FFFFFFF
+#define OCM_BASEVAL 0xFFFC0000
+#define EDAC_DEVICE "ZynqMP-OCM"
+#define OCM_CEUE_MASK 0xC0
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @addr: Fault generated at this address
+ * @data0: Generated fault data
+ * @data1: Generated fault data
+ */
+struct ecc_error_info {
+ u32 addr;
+ u32 data0;
+ u32 data1;
+};
+
+/**
+ * struct zynqmp_ocm_ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct zynqmp_ocm_ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct zynqmp_ocm_edac_priv - DDR memory controller private instance data
+ * @baseaddr: Base address of the DDR controller
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @p_data: Pointer to platform data
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ * @ce_bitpos: Bit position for Correctable Error
+ * @ue_bitpos0: First bit position for Uncorrectable Error
+ * @ue_bitpos1: Second bit position for Uncorrectable Error
+ */
+struct zynqmp_ocm_edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
+ struct zynqmp_ocm_ecc_status stat;
+ const struct zynqmp_ocm_platform_data *p_data;
+ u32 ce_cnt;
+ u32 ue_cnt;
+ u8 ce_bitpos;
+ u8 ue_bitpos0;
+ u8 ue_bitpos1;
+};
+
+/**
+ * zynqmp_ocm_edac_geterror_info - Get the current ecc error info
+ * @base: Pointer to the base address of the ddr memory controller
+ * @p: Pointer to the ocm ecc status structure
+ * @mask: Status register mask value
+ *
+ * Determines there is any ecc error or not
+ *
+ */
+static void zynqmp_ocm_edac_geterror_info(void __iomem *base,
+ struct zynqmp_ocm_ecc_status *p, int mask)
+{
+ if (mask & OCM_CEINTR_MASK) {
+ p->ce_cnt++;
+ p->ceinfo.data0 = readl(base + CE_FFD0_OFST);
+ p->ceinfo.data1 = readl(base + CE_FFD1_OFST);
+ p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
+ writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
+ } else if (mask & OCM_UEINTR_MASK) {
+ p->ue_cnt++;
+ p->ueinfo.data0 = readl(base + UE_FFD0_OFST);
+ p->ueinfo.data1 = readl(base + UE_FFD1_OFST);
+ p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
+ writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
+ }
+}
+
+/**
+ * zynqmp_ocm_edac_handle_error - Handle controller error types CE and UE
+ * @dci: Pointer to the edac device controller instance
+ * @p: Pointer to the ocm ecc status structure
+ *
+ * Handles the controller ECC correctable and un correctable error.
+ */
+static void zynqmp_ocm_edac_handle_error(struct edac_device_ctl_info *dci,
+ struct zynqmp_ocm_ecc_status *p)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "CE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ce(dci, 0, 0, priv->message);
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\n\rOCM ECC error type :%s\n\r"
+ "Addr: [0x%X]\n\rFault Data[31:0]: [0x%X]\n\r"
+ "Fault Data[63:32]: [0x%X]",
+ "UE", pinf->addr, pinf->data0, pinf->data1);
+ edac_device_handle_ue(dci, 0, 0, priv->message);
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * zynqmp_ocm_edac_intr_handler - isr routine
+ * @irq: irq number
+ * @dev_id: device id poniter
+ *
+ * This is the Isr routine called by edac core interrupt thread.
+ * Used to check and post ECC errors.
+ *
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise
+ */
+static irqreturn_t zynqmp_ocm_edac_intr_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ int regval;
+
+ regval = readl(priv->baseaddr + OCM_ISR_OFST);
+ if (!(regval & OCM_CEUE_MASK))
+ return IRQ_NONE;
+
+ zynqmp_ocm_edac_geterror_info(priv->baseaddr,
+ &priv->stat, regval);
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ zynqmp_ocm_edac_handle_error(dci, &priv->stat);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * zynqmp_ocm_edac_get_eccstate - Return the controller ecc status
+ * @base: Pointer to the ddr memory controller base address
+ *
+ * Get the ECC enable/disable status for the controller
+ *
+ * Return: ecc status 0/1.
+ */
+static bool zynqmp_ocm_edac_get_eccstate(void __iomem *base)
+{
+ return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
+}
+
+static const struct of_device_id zynqmp_ocm_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ocmc-1.0"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_show - Shows fault injection count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the fault injection count, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ return sprintf(data, "FIC: 0x%x\n\r",
+ readl(priv->baseaddr + OCM_FIC_OFST));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_fault_count_store - write fi count
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Update the fault injection count register, once the counter reaches
+ * zero, it injects errors
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_fault_count_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 ficount;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtouint(data, 0, &ficount))
+ return -EINVAL;
+
+ ficount &= OCM_FICOUNT_MASK;
+ writel(ficount, priv->baseaddr + OCM_FIC_OFST);
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_show - Shows CE bit position
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the Correctable error bit position,
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_show(struct edac_device_ctl_info
+ *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ce_bitpos <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_cebitpos_store - Set CE bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set any one bit to inject CE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_cebitpos_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ce_bitpos))
+ return -EINVAL;
+
+ if (priv->ce_bitpos <= 31) {
+ writel(1 << priv->ce_bitpos, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if (priv->ce_bitpos >= 32 && priv->ce_bitpos <= 63) {
+ writel(1 << (priv->ce_bitpos - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit number > 64 is not valid\n");
+ }
+
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_show - Shows UE bit postion0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the one of bit position for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos0 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos0_store - set UE bit position0
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the first bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos0_store(
+ struct edac_device_ctl_info *dci,
+ const char *data, size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos0))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 <= 31)
+ writel(1 << priv->ue_bitpos0, priv->baseaddr + OCM_FID0_OFST);
+ else if (priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63)
+ writel(1 << (priv->ue_bitpos0 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ else
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid\n");
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "Set another bit position for UE\n");
+ return count;
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitpos1_show - Shows UE bit postion1
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ *
+ * Shows the second bit postion configured for UE error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_show(
+ struct edac_device_ctl_info *dci, char *data)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ if (priv->ue_bitpos1 <= 31)
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID0_OFST))));
+
+ return sprintf(data, "Fault Injection Data Reg: [0x%x]\n\r",
+ ((readl(priv->baseaddr + OCM_FID1_OFST))));
+
+}
+
+/**
+ * zynqmp_ocm_edac_inject_uebitposition1_store - Set UE second bit postion
+ * @dci: Pointer to the edac device struct
+ * @data: Pointer to user data
+ * @count: read the size bytes from buffer
+ *
+ * Set the second bit postion for UE Error generation,we need to configure
+ * any two bitpositions to inject UE Error
+ * Return: Number of bytes copied.
+ */
+static ssize_t zynqmp_ocm_edac_inject_uebitpos1_store(
+ struct edac_device_ctl_info *dci, const char *data,
+ size_t count)
+{
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+ u32 mask;
+
+ if (!data)
+ return -EFAULT;
+
+ if (kstrtou8(data, 0, &priv->ue_bitpos1))
+ return -EINVAL;
+
+ if (priv->ue_bitpos0 == priv->ue_bitpos1) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit positions should not be equal\n");
+ return -EINVAL;
+ }
+
+ /* If both bit postions are referring to 32 bit data, then configure
+ * only FID0 register or if it is 64 bit data, then configure only
+ * FID1 register.
+ */
+ if (priv->ue_bitpos0 <= 31 &&
+ priv->ue_bitpos1 <= 31) {
+ mask = (1 << priv->ue_bitpos0);
+ mask |= (1 << priv->ue_bitpos1);
+ writel(mask, priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ mask = (1 << (priv->ue_bitpos0 - 32));
+ mask |= (1 << (priv->ue_bitpos1 - 32));
+ writel(mask, priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ }
+
+ /* If one bit position is referring a bit in 32 bit data and other in
+ * 64 bit data, just configure FID0/FID1 based on uebitpos1.
+ */
+ if ((priv->ue_bitpos0 <= 31) &&
+ (priv->ue_bitpos1 >= 32 && priv->ue_bitpos1 <= 63)) {
+ writel(1 << (priv->ue_bitpos1 - 32),
+ priv->baseaddr + OCM_FID1_OFST);
+ } else if ((priv->ue_bitpos0 >= 32 && priv->ue_bitpos0 <= 63) &&
+ (priv->ue_bitpos1 <= 31)) {
+ writel(1 << priv->ue_bitpos1,
+ priv->baseaddr + OCM_FID0_OFST);
+ } else {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Bit position > 64 is not valid, Valid bits:[63:0]\n");
+ }
+
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "UE at Bit Position0: %d Bit Position1: %d\n",
+ priv->ue_bitpos0, priv->ue_bitpos1);
+ return count;
+}
+
+static struct edac_dev_sysfs_attribute zynqmp_ocm_edac_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_cebitpos",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_cebitpos_show,
+ .store = zynqmp_ocm_edac_inject_cebitpos_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos0",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos0_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos0_store},
+ {
+ .attr = {
+ .name = "inject_uebitpos1",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_uebitpos1_show,
+ .store = zynqmp_ocm_edac_inject_uebitpos1_store},
+ {
+ .attr = {
+ .name = "inject_fault_count",
+ .mode = (0644)
+ },
+ .show = zynqmp_ocm_edac_inject_fault_count_show,
+ .store = zynqmp_ocm_edac_inject_fault_count_store},
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+/**
+ * zynqmp_set_ocm_edac_sysfs_attributes - create sysfs attributes
+ * @edac_dev: Pointer to the edac device struct
+ *
+ * Creates sysfs entires for error injection
+ * Return: None.
+ */
+static void zynqmp_set_ocm_edac_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = zynqmp_ocm_edac_sysfs_attributes;
+}
+
+/**
+ * zynqmp_ocm_edac_probe - Check controller and bind driver
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Probes a specific controller instance for binding with the driver.
+ *
+ * Return: 0 if the controller instance was successfully bound to the
+ * driver; otherwise, < 0 on error.
+ */
+static int zynqmp_ocm_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct zynqmp_ocm_edac_priv *priv;
+ int irq, status;
+ struct resource *res;
+ void __iomem *baseaddr;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!zynqmp_ocm_edac_get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "ECC not enabled - Disabling EDAC driver\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to allocate EDAC device\n");
+ return -ENOMEM;
+ }
+
+ priv = dci->pvt_info;
+ platform_set_drvdata(pdev, dci);
+ dci->dev = &pdev->dev;
+ priv->baseaddr = baseaddr;
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
+ dci->dev_name = dev_name(&pdev->dev);
+
+ zynqmp_set_ocm_edac_sysfs_attributes(dci);
+ if (edac_device_add_device(dci))
+ goto free_dev_ctl;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "No irq %d in DT\n", irq);
+ return irq;
+ }
+
+ status = devm_request_irq(&pdev->dev, irq,
+ zynqmp_ocm_edac_intr_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (status < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
+ goto free_edac_dev;
+ }
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IEN_OFST);
+
+ return 0;
+
+free_edac_dev:
+ edac_device_del_device(&pdev->dev);
+free_dev_ctl:
+ edac_device_free_ctl_info(dci);
+
+ return -1;
+}
+
+/**
+ * zynqmp_ocm_edac_remove - Unbind driver from controller
+ * @pdev: Pointer to the platform_device struct
+ *
+ * Return: Unconditionally 0
+ */
+static int zynqmp_ocm_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct zynqmp_ocm_edac_priv *priv = dci->pvt_info;
+
+ writel(OCM_CEUE_MASK, priv->baseaddr + OCM_IDS_OFST);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_ocm_edac_driver = {
+ .driver = {
+ .name = "zynqmp-ocm-edac",
+ .of_match_table = zynqmp_ocm_edac_match,
+ },
+ .probe = zynqmp_ocm_edac_probe,
+ .remove = zynqmp_ocm_edac_remove,
+};
+
+module_platform_driver(zynqmp_ocm_edac_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("ZynqMP OCM ECC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index 9a9bd190888e..37d712adb886 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -23,4 +23,12 @@ config ZYNQMP_FIRMWARE_DEBUG
Say yes to enable ZynqMP firmware interface debug APIs.
If in doubt, say N.
+config ZYNQMP_FIRMWARE_SECURE
+ bool "Enable Xilinx Zynq MPSoC secure firmware loading APIs"
+ help
+ Firmware secure driver is used as an interface to load
+ Authenticated and/or Encrypted firmware with single loadable
+ partition. Say yes to enable ZynqMP secure firmware loading
+ APIs.If in doubt, say N
+
endmenu
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..1b57bb14ad94 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ggs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE_SECURE) += zynqmp-secure.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 43bc6cfdab45..579c134fb93b 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,88 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_REQUEST_SUSPEND),
+ PM_API(PM_SELF_SUSPEND),
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_ABORT_SUSPEND),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SET_WAKEUP_SOURCE),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
+ PM_API(PM_SET_MAX_LATENCY),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_SET_CONFIGURATION),
+ PM_API(PM_GET_NODE_STATUS),
+ PM_API(PM_GET_OPERATING_CHARACTERISTIC),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_GET_FUNCTION),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETRATE),
+ PM_API(PM_CLOCK_GETRATE),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
static struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_self_suspend - PM call for master to suspend itself
+ * @node: Node ID of the master or subsystem
+ * @latency: Requested maximum wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_self_suspend(const u32 node, const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_SELF_SUSPEND, node, latency,
+ state, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_abort_suspend - PM call to announce that a prior suspend request
+ * is to be aborted.
+ * @reason: Reason for the abort
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_abort_suspend(const enum zynqmp_pm_abort_reason reason)
+{
+ return zynqmp_pm_invoke_fn(PM_ABORT_SUSPEND, reason, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_register_notifier - Register the PU to be notified of PM events
+ * @node: Node ID of the slave
+ * @event: The event to be notified about
+ * @wake: Wake up on event
+ * @enable: Enable or disable the notifier
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -87,6 +162,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
+ u64 rate;
int ret;
struct zynqmp_pm_query_data qdata = {0};
@@ -96,6 +172,191 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_REQUEST_SUSPEND:
+ ret = eemi_ops->request_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO,
+ pm_api_arg[2] ? pm_api_arg[2] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_SELF_SUSPEND:
+ ret = zynqmp_pm_self_suspend(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY, 0);
+ break;
+ case PM_FORCE_POWERDOWN:
+ ret = eemi_ops->force_powerdown(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_ABORT_SUSPEND:
+ ret = zynqmp_pm_abort_suspend(pm_api_arg[0] ? pm_api_arg[0] :
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = eemi_ops->request_wakeup(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SET_WAKEUP_SOURCE:
+ ret = eemi_ops->set_wakeup_source(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = eemi_ops->system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = eemi_ops->request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = eemi_ops->release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = eemi_ops->set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_SET_MAX_LATENCY:
+ ret = eemi_ops->set_max_latency(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_MAX_LATENCY);
+ break;
+ case PM_SET_CONFIGURATION:
+ ret = eemi_ops->set_configuration(pm_api_arg[0]);
+ break;
+ case PM_GET_NODE_STATUS:
+ ret = eemi_ops->get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_GET_OPERATING_CHARACTERISTIC:
+ ret = eemi_ops->get_operating_characteristic(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER,
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_OPERATING_CHARACTERISTIC:\n\tNodeId: %llu\n\tType: %llu\n\tResult: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = eemi_ops->reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = eemi_ops->reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = eemi_ops->get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_GET_FUNCTION:
+ ret = eemi_ops->pinctrl_get_function(pm_api_arg[0],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Current set function for the pin: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = eemi_ops->pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = eemi_ops->pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = eemi_ops->pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = eemi_ops->ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS ||
+ pm_api_arg[1] == IOCTL_PROBE_COUNTER_READ))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = eemi_ops->clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = eemi_ops->clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = eemi_ops->clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = eemi_ops->clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = eemi_ops->clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETRATE:
+ ret = eemi_ops->clock_setrate(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETRATE:
+ ret = eemi_ops->clock_getrate(pm_api_arg[0], &rate);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock rate :%llu\n", rate);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = eemi_ops->clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = eemi_ops->clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
diff --git a/drivers/firmware/xilinx/zynqmp-ggs.c b/drivers/firmware/xilinx/zynqmp-ggs.c
new file mode 100644
index 000000000000..42179ad73c7f
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ggs.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+static ssize_t read_register(char *buf, u32 ioctl_id, u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ ret = eemi_ops->ioctl(0, ioctl_id, reg, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t write_register(const char *buf, size_t count, u32 read_ioctl,
+ u32 write_ioctl, u32 reg)
+{
+ char *kern_buff, *inbuf, *tok;
+ long mask, value;
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops->ioctl)
+ return -EFAULT;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the write mask */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = kstrtol(tok, 16, &value);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = eemi_ops->ioctl(0, read_ioctl, reg, 0, ret_payload);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret_payload[1] &= ~mask;
+ value &= mask;
+ value |= ret_payload[1];
+
+ ret = eemi_ops->ioctl(0, write_ioctl, reg, value, NULL);
+ if (ret)
+ ret = -EFAULT;
+
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/**
+ * ggs_show - Show global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a ggs register value.
+ *
+ * User-space interface for viewing the content of the ggs0 register.
+ * cat /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_GGS, reg);
+}
+
+/**
+ * ggs_store - Store global general storage (ggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a ggs register value.
+ *
+ * For example, the user-space interface for storing a value to the
+ * ggs0 register:
+ * echo 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/ggs0
+ */
+static ssize_t ggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ if (!kobj || !attr || !buf || !count || reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ return write_register(buf, count, IOCTL_READ_GGS, IOCTL_WRITE_GGS, reg);
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(kobj, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(kobj, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+/**
+ * pggs_show - Show persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ * @reg: Register number
+ *
+ * Return:Number of bytes printed into the buffer.
+ *
+ * Helper function for viewing a pggs register value.
+ */
+static ssize_t pggs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ return read_register(buf, IOCTL_READ_PGGS, reg);
+}
+
+/**
+ * pggs_store - Store persistent global general storage (pggs) sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Size of buf
+ * @reg: Register number
+ *
+ * Return: count argument if request succeeds, the corresponding
+ * error code otherwise
+ *
+ * Helper function for storing a pggs register value.
+ */
+static ssize_t pggs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count,
+ u32 reg)
+{
+ return write_register(buf, count, IOCTL_READ_PGGS,
+ IOCTL_WRITE_PGGS, reg);
+}
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(kobj, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(kobj, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static struct kobj_attribute zynqmp_attr_ggs0 = __ATTR_RW(ggs0);
+static struct kobj_attribute zynqmp_attr_ggs1 = __ATTR_RW(ggs1);
+static struct kobj_attribute zynqmp_attr_ggs2 = __ATTR_RW(ggs2);
+static struct kobj_attribute zynqmp_attr_ggs3 = __ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static struct kobj_attribute zynqmp_attr_pggs0 = __ATTR_RW(pggs0);
+static struct kobj_attribute zynqmp_attr_pggs1 = __ATTR_RW(pggs1);
+static struct kobj_attribute zynqmp_attr_pggs2 = __ATTR_RW(pggs2);
+static struct kobj_attribute zynqmp_attr_pggs3 = __ATTR_RW(pggs3);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_ggs0.attr,
+ &zynqmp_attr_ggs1.attr,
+ &zynqmp_attr_ggs2.attr,
+ &zynqmp_attr_ggs3.attr,
+ &zynqmp_attr_pggs0.attr,
+ &zynqmp_attr_pggs1.attr,
+ &zynqmp_attr_pggs2.attr,
+ &zynqmp_attr_pggs3.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj)
+{
+ return sysfs_create_group(parent_kobj, &attr_group);
+}
diff --git a/drivers/firmware/xilinx/zynqmp-secure.c b/drivers/firmware/xilinx/zynqmp-secure.c
new file mode 100644
index 000000000000..0647e4e373d4
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-secure.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP SecureFw Driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+
+#define ZYNQMP_AES_KEY_SIZE 64
+
+static u8 key[ZYNQMP_AES_KEY_SIZE] = {0};
+static dma_addr_t dma_addr;
+static u8 *keyptr;
+static size_t dma_size;
+static char *kbuf;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+static ssize_t secure_load_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const struct firmware *fw;
+ char image_name[NAME_MAX];
+ u64 dst, ret;
+ int len;
+
+ if (IS_ERR(eemi_ops) || !eemi_ops->secure_image)
+ return -EFAULT;
+
+ strncpy(image_name, buf, NAME_MAX);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = request_firmware(&fw, image_name, dev);
+ if (ret) {
+ dev_err(dev, "Error requesting firmware %s\n", image_name);
+ return ret;
+ }
+ dma_size = fw->size;
+
+ if (keyptr)
+ dma_size = fw->size + ZYNQMP_AES_KEY_SIZE;
+
+ kbuf = dma_alloc_coherent(dev, dma_size,
+ &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, fw->data, fw->size);
+
+ if (keyptr)
+ memcpy(kbuf + fw->size, key, ZYNQMP_AES_KEY_SIZE);
+
+ /* To ensure cache coherency */
+ __flush_cache_user_range((unsigned long)kbuf,
+ (unsigned long)kbuf + dma_size);
+ release_firmware(fw);
+
+ if (keyptr)
+ ret = eemi_ops->secure_image(dma_addr, dma_addr + fw->size,
+ &dst);
+ else
+ ret = eemi_ops->secure_image(dma_addr, 0, &dst);
+
+ if (ret) {
+ dev_info(dev, "Failed to load secure image \r\n");
+ return ret;
+ }
+ dev_info(dev, "Verified image at 0x%llx\n", dst);
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, ZYNQMP_AES_KEY_SIZE + 1, "%s\n", key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ memcpy(key, buf, count);
+ keyptr = &key[0];
+ return count;
+}
+
+static ssize_t secure_load_done_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+ if (value)
+ dma_free_coherent(dev, dma_size, kbuf, dma_addr);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(key);
+static DEVICE_ATTR_WO(secure_load);
+static DEVICE_ATTR_WO(secure_load_done);
+
+static struct attribute *securefw_attrs[] = {
+ &dev_attr_secure_load_done.attr,
+ &dev_attr_secure_load.attr,
+ &dev_attr_key.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(securefw);
+
+static int securefw_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct platform_device *securefw_pdev;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ securefw_pdev = pdev;
+
+ securefw_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ ret = of_dma_configure(&securefw_pdev->dev, NULL, true);
+ if (ret < 0) {
+ dev_info(&securefw_pdev->dev, "Cannot setup DMA ops\r\n");
+ return ret;
+ }
+
+ ret = sysfs_create_groups(&securefw_pdev->dev.kobj, securefw_groups);
+ if (ret)
+ return ret;
+
+ dev_info(&securefw_pdev->dev, "securefw probed\r\n");
+ return ret;
+}
+
+static int securefw_remove(struct platform_device *pdev)
+{
+ sysfs_remove_groups(&pdev->dev.kobj, securefw_groups);
+ return 0;
+}
+
+static struct platform_driver securefw_driver = {
+ .driver = {
+ .name = "securefw",
+ },
+ .probe = securefw_probe,
+ .remove = securefw_remove,
+};
+
+static struct platform_device *securefw_dev_reg;
+
+static int __init zynqmp_secure_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&securefw_driver);
+ if (ret)
+ return ret;
+
+ securefw_dev_reg = platform_device_register_simple("securefw", -1,
+ NULL, 0);
+ if (IS_ERR(securefw_dev_reg)) {
+ ret = PTR_ERR(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit zynqmp_secure_exit(void)
+{
+ platform_device_unregister(securefw_dev_reg);
+ platform_driver_unregister(&securefw_driver);
+}
+
+module_init(zynqmp_secure_init);
+module_exit(zynqmp_secure_exit);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 41b65164a367..8b7aaa793680 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,6 +24,8 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static unsigned long register_address;
+
static const struct zynqmp_eemi_ops *eemi_ops_tbl;
static bool feature_check_enabled;
@@ -504,6 +506,35 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
}
/**
+ * versal_is_valid_ioctl() - Check whether IOCTL ID is valid or not for versal
+ * @ioctl_id: IOCTL ID
+ *
+ * Return: 1 if IOCTL is valid else 0
+ */
+static inline int versal_is_valid_ioctl(u32 ioctl_id)
+{
+ switch (ioctl_id) {
+ case IOCTL_GET_RPU_OPER_MODE:
+ case IOCTL_SET_RPU_OPER_MODE:
+ case IOCTL_RPU_BOOT_ADDR_CONFIG:
+ case IOCTL_TCM_COMB_CONFIG:
+ case IOCTL_SET_TAPDELAY_BYPASS:
+ case IOCTL_WRITE_GGS:
+ case IOCTL_READ_GGS:
+ case IOCTL_WRITE_PGGS:
+ case IOCTL_READ_PGGS:
+ case IOCTL_SET_BOOT_HEALTH_STATUS:
+ case IOCTL_PROBE_COUNTER_READ:
+ case IOCTL_PROBE_COUNTER_WRITE:
+ case IOCTL_USB_SET_STATE:
+ case IOCTL_OSPI_MUX_SELECT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
* zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
* @ioctl_id: IOCTL ID
*
@@ -518,6 +549,19 @@ static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
case IOCTL_GET_PLL_FRAC_MODE:
case IOCTL_SET_PLL_FRAC_DATA:
case IOCTL_GET_PLL_FRAC_DATA:
+ case IOCTL_GET_RPU_OPER_MODE:
+ case IOCTL_SET_RPU_OPER_MODE:
+ case IOCTL_RPU_BOOT_ADDR_CONFIG:
+ case IOCTL_TCM_COMB_CONFIG:
+ case IOCTL_SET_TAPDELAY_BYPASS:
+ case IOCTL_SET_SGMII_MODE:
+ case IOCTL_WRITE_GGS:
+ case IOCTL_READ_GGS:
+ case IOCTL_WRITE_PGGS:
+ case IOCTL_READ_PGGS:
+ case IOCTL_ULPI_RESET:
+ case IOCTL_SET_BOOT_HEALTH_STATUS:
+ case IOCTL_AFI:
return 1;
default:
return 0;
@@ -539,8 +583,13 @@ static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
u32 *out)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ if (of_find_compatible_node(NULL, NULL, "xlnx,versal")) {
+ if (!versal_is_valid_ioctl(ioctl_id))
+ return -EINVAL;
+ } else {
+ if (!zynqmp_is_valid_ioctl(ioctl_id))
+ return -EINVAL;
+ }
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
arg1, arg2, out);
@@ -733,6 +782,442 @@ static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return ret;
}
+/**
+ * zynqmp_pm_load_pdi - Load and process pdi
+ * @src: Source device where PDI is located
+ * @address: Pdi src address
+ *
+ * This function provides support to load pdi from linux
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
+ lower_32_bits(address),
+ upper_32_bits(address), 0, NULL);
+}
+
+/**
+ * zynqmp_pm_fpga_read - Perform the fpga configuration readback
+ * @reg_numframes: Configuration register offset (or) Number of frames to read
+ * @phys_address: Physical Address of the buffer
+ * @readback_type: Type of fpga readback operation
+ * @value: Value to read
+ *
+ * This function provides access to xilfpga library to perform
+ * fpga configuration readback.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_read(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, reg_numframes,
+ lower_32_bits(phys_address),
+ upper_32_bits(phys_address), readback_type,
+ ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
+ * @address: Address of the data/ Address of output buffer where
+ * hash should be stored.
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - for initializing csudma driver and SHA3(Here address
+ * and size inputs can be NULL).
+ * BIT(1) - to call Sha3_Update API which can be called multiple
+ * times when data is not contiguous.
+ * BIT(2) - to get final hash of the whole updated data.
+ * Hash will be overwritten at provided address with
+ * 48 bytes.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_rsa - Access RSA hardware to encrypt/decrypt the data with RSA.
+ * @address: Address of the data
+ * @size: Size of the data.
+ * @flags:
+ * BIT(0) - Encryption/Decryption
+ * 0 - RSA decryption with private key
+ * 1 - RSA encryption with public key.
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_rsa(const u64 address, const u32 size, const u32 flags)
+{
+ u32 lower_32_bits = (u32)address;
+ u32 upper_32_bits = (u32)(address >> 32);
+
+ return zynqmp_pm_invoke_fn(PM_SECURE_RSA, upper_32_bits, lower_32_bits,
+ size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_request_suspend - PM call to request for another PU or subsystem to
+ * be suspended gracefully.
+ * @node: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ * @latency: Requested wakeup latency (not supported)
+ * @state: Requested state (not supported)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_suspend(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_SUSPEND, node, ack,
+ latency, state, NULL);
+}
+
+/**
+ * zynqmp_pm_force_powerdown - PM call to request for another PU or subsystem to
+ * be powered down forcefully
+ * @target: Node ID of the targeted PU or subsystem
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_force_powerdown(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, target, ack, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_wakeup - PM call to wake up selected master or subsystem
+ * @node: Node ID of the master or subsystem
+ * @set_addr: Specifies whether the address argument is relevant
+ * @address: Address from which to resume when woken up
+ * @ack: Flag to specify whether acknowledge requested
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_wakeup(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ /* set_addr flag is encoded into 1st bit of address */
+ return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
+ address >> 32, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_set_wakeup_source - PM call to specify the wakeup source
+ * while suspended
+ * @target: Node ID of the targeted PU or subsystem
+ * @wakeup_node:Node ID of the wakeup peripheral
+ * @enable: Enable or disable the specified peripheral as wake source
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_wakeup_source(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_WAKEUP_SOURCE, target,
+ wakeup_node, enable, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_max_latency - PM call to set wakeup latency requirements
+ * @node: Node ID of the slave
+ * @latency: Requested maximum wakeup latency
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_max_latency(const u32 node, const u32 latency)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_MAX_LATENCY, node, latency,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_configuration - PM call to set system configuration
+ * @physical_addr: Physical 32-bit address of data structure in memory
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_configuration(const u32 physical_addr)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_CONFIGURATION, physical_addr, 0,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, node, 0, 0,
+ 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ if (requirements)
+ *requirements = ret_payload[2];
+ if (usage)
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_get_operating_characteristic - PM call to request operating
+ * characteristic information
+ * @node: Node ID of the slave
+ * @type: Type of the operating characteristic requested
+ * @result: Used to return the requsted operating characteristic
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_operating_characteristic(const u32 node,
+ const enum zynqmp_pm_opchar_type type,
+ u32 *const result)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!result)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_OPERATING_CHARACTERISTIC,
+ node, type, 0, 0, ret_payload);
+ if (ret_payload[0] == XST_PM_SUCCESS)
+ *result = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_request - Request Pin from firmware
+ * @pin: Pin number to request
+ *
+ * This function requests pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_request(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
+ * @pin: Pin number to release
+ *
+ * This function release pin from firmware.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_release(const u32 pin)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
+ * @pin: Pin number
+ * @id: Buffer to store function ID
+ *
+ * This function provides the function currently set for the given pin.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!id)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
+ 0, 0, ret_payload);
+ *id = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_function - Set requested function for the pin
+ * @pin: Pin number
+ * @id: Function ID to set
+ *
+ * This function sets requested function for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to get
+ * @value: Buffer to store parameter value
+ *
+ * This function gets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
+ u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
+ 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
+ * @pin: Pin number
+ * @param: Parameter to set
+ * @value: Parameter value to set
+ *
+ * This function sets requested configuration parameter for the given pin.
+ *
+ * Return: Returns status, either success or error+reason.
+ */
+static int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
+ param, value, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_config_reg_access - PM Config API for Config register access
+ * @register_access_id: ID of the requested REGISTER_ACCESS
+ * @address: Address of the register to be accessed
+ * @mask: Mask to be written to the register
+ * @value: Value to be written to the register
+ * @out: Returned output value
+ *
+ * This function calls REGISTER_ACCESS to configure CSU/PMU registers.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+static int zynqmp_pm_config_reg_access(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_ACCESS, register_access_id,
+ address, mask, value, out);
+}
+
+/**
+ * zynqmp_pm_efuse_access - Provides access to efuse memory.
+ * @address: Address of the efuse params structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+static int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_EFUSE_ACCESS, upper_32_bits(address),
+ lower_32_bits(address), 0, 0, ret_payload);
+ *out = ret_payload[1];
+
+ return ret;
+}
+
+static int zynqmp_pm_secure_load(const u64 src_addr, u64 key_addr, u64 *dst)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret_value;
+
+ if (!dst)
+ return -EINVAL;
+
+ ret_value = zynqmp_pm_invoke_fn(PM_SECURE_IMAGE,
+ lower_32_bits(src_addr),
+ upper_32_bits(src_addr),
+ lower_32_bits(key_addr),
+ upper_32_bits(key_addr),
+ ret_payload);
+ *dst = ((u64)ret_payload[1] << 32) | ret_payload[2];
+
+ return ret_value;
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.get_chipid = zynqmp_pm_get_chipid,
@@ -756,7 +1241,29 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.set_requirement = zynqmp_pm_set_requirement,
.fpga_load = zynqmp_pm_fpga_load,
.fpga_get_status = zynqmp_pm_fpga_get_status,
+ .fpga_read = zynqmp_pm_fpga_read,
+ .sha_hash = zynqmp_pm_sha_hash,
+ .rsa = zynqmp_pm_rsa,
+ .request_suspend = zynqmp_pm_request_suspend,
+ .force_powerdown = zynqmp_pm_force_powerdown,
+ .request_wakeup = zynqmp_pm_request_wakeup,
+ .set_wakeup_source = zynqmp_pm_set_wakeup_source,
+ .system_shutdown = zynqmp_pm_system_shutdown,
+ .set_max_latency = zynqmp_pm_set_max_latency,
+ .set_configuration = zynqmp_pm_set_configuration,
+ .get_node_status = zynqmp_pm_get_node_status,
+ .get_operating_characteristic = zynqmp_pm_get_operating_characteristic,
+ .pinctrl_request = zynqmp_pm_pinctrl_request,
+ .pinctrl_release = zynqmp_pm_pinctrl_release,
+ .pinctrl_get_function = zynqmp_pm_pinctrl_get_function,
+ .pinctrl_set_function = zynqmp_pm_pinctrl_set_function,
+ .pinctrl_get_config = zynqmp_pm_pinctrl_get_config,
+ .pinctrl_set_config = zynqmp_pm_pinctrl_set_config,
+ .register_access = zynqmp_pm_config_reg_access,
.aes = zynqmp_pm_aes_engine,
+ .efuse_access = zynqmp_pm_efuse_access,
+ .pdi_load = zynqmp_pm_load_pdi,
+ .secure_image = zynqmp_pm_secure_load,
};
/**
@@ -774,6 +1281,350 @@ const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
+};
+
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
+/**
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
+ *
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
+ */
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+/**
+ * shutdown_scope_show - Show shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: Requested available shutdown_scope attributes string
+ *
+ * User-space interface for viewing the available scope options for system
+ * shutdown. Scope option for next shutdown call is marked with [].
+ *
+ * Usage: cat /sys/firmware/zynqmp/shutdown_scope
+ *
+ * Return: Number of bytes printed into the buffer.
+ */
+static ssize_t shutdown_scope_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+/**
+ * shutdown_scope_store - Store shutdown_scope sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered shutdown_scope attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the scope for the next system shutdown.
+ * Usage: echo <scope> > /sys/firmware/zynqmp/shutdown_scope
+ *
+ * The Linux shutdown functionality implemented via PSCI system_off does not
+ * include an option to set a scope, i.e. which parts of the system to shut
+ * down.
+ *
+ * This API function allows to set the shutdown scope for the next shutdown
+ * request by passing it to the ATF running in EL3. When the next shutdown
+ * is performed, the platform specific portion of PSCI-system_off can use
+ * the chosen shutdown scope.
+ *
+ * subsystem: Only the APU along with all of its peripherals not used by other
+ * processing units will be shut down. This may result in the FPD
+ * power domain being shut down provided that no other processing
+ * unit uses FPD peripherals or DRAM.
+ * ps_only: The complete PS will be shut down, including the RPU, PMU, etc.
+ * Only the PL domain (FPGA) remains untouched.
+ * system: The complete system/device is shut down.
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t shutdown_scope_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_shutdown_scope =
+ __ATTR_RW(shutdown_scope);
+
+/**
+ * health_status_store - Store health_status sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the boot health status.
+ * Usage: echo <value> > /sys/firmware/zynqmp/health_status
+ *
+ * Value:
+ * 1 - Set healthy bit to 1
+ * 0 - Unset healthy bit
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t health_status_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_ioctl(0, IOCTL_SET_BOOT_HEALTH_STATUS, value, 0, NULL);
+ if (ret) {
+ pr_err("unable to set healthy bit value to %u\n", value);
+ return ret;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute zynqmp_attr_health_status =
+ __ATTR_WO(health_status);
+
+/**
+ * config_reg_store - Write config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ * @count: Buffer size
+ *
+ * User-space interface for setting the config register.
+ *
+ * To write any CSU/PMU register
+ * echo <address> <mask> <values> > /sys/firmware/zynqmp/config_reg
+ * Usage:
+ * echo 0x345AB234 0xFFFFFFFF 0x1234ABCD > /sys/firmware/zynqmp/config_reg
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Return: count argument if request succeeds, the corresponding error
+ * code otherwise
+ */
+static ssize_t config_reg_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ char *kern_buff, *inbuf, *tok;
+ unsigned long address, value, mask;
+ int ret;
+
+ kern_buff = kzalloc(count, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+
+ ret = strlcpy(kern_buff, buf, count);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ inbuf = kern_buff;
+
+ /* Read the addess */
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &address);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ /* Read the write value */
+ tok = strsep(&inbuf, " ");
+ /*
+ * If parameter provided is only address, then its a read operation.
+ * Store the address in a global variable and retrieve whenever
+ * required.
+ */
+ if (!tok) {
+ register_address = address;
+ goto err;
+ }
+ register_address = address;
+
+ ret = kstrtol(tok, 16, &mask);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ tok = strsep(&inbuf, " ");
+ if (!tok) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = kstrtol(tok, 16, &value);
+ if (ret) {
+ ret = -EFAULT;
+ goto err;
+ }
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_WRITE, address,
+ mask, value, NULL);
+ if (ret)
+ pr_err("unable to write value to %lx\n", value);
+err:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+ return count;
+}
+
+/**
+ * config_reg_show - Read config_reg sysfs attribute
+ * @kobj: Kobject structure
+ * @attr: Kobject attribute structure
+ * @buf: User entered health_status attribute string
+ *
+ * User-space interface for getting the config register.
+ *
+ * To Read any CSU/PMU register, write address to the variable like below
+ * echo <address> > /sys/firmware/zynqmp/config_reg
+ *
+ * Then Read the address using below command
+ * cat /sys/firmware/zynqmp/config_reg
+ *
+ * Return: number of chars written to buf.
+ */
+static ssize_t config_reg_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_config_reg_access(CONFIG_REG_READ, register_address,
+ 0, 0, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static struct kobj_attribute zynqmp_attr_config_reg =
+ __ATTR_RW(config_reg);
+
+static struct attribute *attrs[] = {
+ &zynqmp_attr_shutdown_scope.attr,
+ &zynqmp_attr_health_status.attr,
+ &zynqmp_attr_config_reg.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attrs,
+ NULL,
+};
+
+static int zynqmp_pm_sysfs_init(void)
+{
+ struct kobject *zynqmp_kobj;
+ int ret;
+
+ zynqmp_kobj = kobject_create_and_add("zynqmp", firmware_kobj);
+ if (!zynqmp_kobj) {
+ pr_err("zynqmp: Firmware kobj add failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_group(zynqmp_kobj, &attr_group);
+ if (ret) {
+ pr_err("%s() sysfs creation fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pm_ggs_init(zynqmp_kobj);
+ if (ret) {
+ pr_err("%s() GGS init fail with error %d\n",
+ __func__, ret);
+ goto err;
+ }
+err:
+ return ret;
+}
+
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -823,6 +1674,12 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
/* Assign eemi_ops_table */
eemi_ops_tbl = &eemi_ops;
+ ret = zynqmp_pm_sysfs_init();
+ if (ret) {
+ pr_err("%s() sysfs init fail with error %d\n", __func__, ret);
+ return ret;
+ }
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 72380e1d31c7..a2f02533176d 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -12,6 +12,16 @@ menuconfig FPGA
if FPGA
+config FPGA_MGR_DEBUG_FS
+ tristate "FPGA debug fs"
+ select DEBUG_FS
+ help
+ Say Y here if you want to expose a DebugFS interface for the
+ FPGA Manager Framework. FPGA manager DebugFS provides a user
+ interface to read the fpga specific configuration information.
+
+ If unsure, say N.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -91,6 +101,24 @@ config FPGA_MGR_TS73XX
FPGA manager driver support for the Altera Cyclone II FPGA
present on the TS-73xx SBC boards.
+config FPGA_MGR_ZYNQ_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQ_FPGA
+ help
+ Zynq AFI driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the data bus-width etc.
+
+config XILINX_AFI_FPGA
+ bool "Xilinx AFI FPGA"
+ depends on FPGA_MGR_ZYNQMP_FPGA || COMPILE_TEST
+ help
+ FPGA manager driver support for writing to the AFI registers
+ for configuring the PS_PL interface. For some of the bitstream
+ or designs to work the PS to PL interfaces need to be configured
+ like the datawidth etc.
+
config FPGA_BRIDGE
tristate "FPGA Bridge Framework"
help
@@ -215,4 +243,13 @@ config FPGA_MGR_ZYNQMP_FPGA
to configure the programmable logic(PL) through PS
on ZynqMP SoC.
+config FPGA_MGR_VERSAL_FPGA
+ tristate "Xilinx Versal FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Select this option to enable FPGA manager driver support for
+ Xilinx Versal SOC. This driver uses the versal soc firmware
+ interface to load programmable logic(PL) images
+ on versal soc.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..ba9839cec7e9 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,7 +17,10 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQ_AFI_FPGA) += zynq-afi.o
obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
+obj-$(CONFIG_FPGA_MGR_VERSAL_FPGA) += versal-fpga.o
+obj-$(CONFIG_XILINX_AFI_FPGA) += xilinx-afi.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 4bab9028940a..6422a58b4793 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -13,6 +13,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+/* For enabling manual bridge set(enable/disable) function */
+#ifdef CONFIG_DEBUG_KERNEL
+#undef DEBUG
+#define DEBUG
+#endif
+
static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
@@ -304,9 +310,33 @@ static ssize_t state_show(struct device *dev,
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
+#ifdef DEBUG
+static ssize_t set_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+ long enable;
+ int ret;
+
+ ret = kstrtol(buf, 16, &enable);
+ if (ret)
+ return ret;
+
+ if (bridge->br_ops && bridge->br_ops->enable_set)
+ enable = bridge->br_ops->enable_set(bridge, !!enable);
+
+ return count;
+}
+static DEVICE_ATTR_WO(set);
+#endif
+
static struct attribute *fpga_bridge_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
+#ifdef DEBUG
+ &dev_attr_set.attr,
+#endif
NULL,
};
ATTRIBUTE_GROUPS(fpga_bridge);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index e05104f5e40c..f1f5e722831a 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -8,6 +8,8 @@
* With code from the mailing list:
* Copyright (C) 2013 Xilinx, Inc.
*/
+#include <linux/dma-buf.h>
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/fpga/fpga-mgr.h>
#include <linux/idr.h>
@@ -302,6 +304,39 @@ static int fpga_mgr_buf_load(struct fpga_manager *mgr,
return rc;
}
+static int fpga_dmabuf_load(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ int ret;
+
+ /* create attachment for dmabuf with the user device */
+ attach = dma_buf_attach(mgr->dmabuf, &mgr->dev);
+ if (IS_ERR(attach)) {
+ pr_err("failed to attach dmabuf\n");
+ ret = PTR_ERR(attach);
+ goto fail_put;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ info->sgt = sgt;
+ ret = fpga_mgr_buf_load_sg(mgr, info, info->sgt);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+
+fail_detach:
+ dma_buf_detach(mgr->dmabuf, attach);
+fail_put:
+ dma_buf_put(mgr->dmabuf);
+
+ return ret;
+}
+
/**
* fpga_mgr_firmware_load - request firmware and load to fpga
* @mgr: fpga manager
@@ -328,6 +363,10 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ;
+ /* flags indicates whether to do full or partial reconfiguration */
+ info->flags = mgr->flags;
+ memcpy(info->key, mgr->key, ENCRYPTED_KEY_LEN);
+
ret = request_firmware(&fw, image_name, dev);
if (ret) {
mgr->state = FPGA_MGR_STATE_FIRMWARE_REQ_ERR;
@@ -354,6 +393,8 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr,
*/
int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info)
{
+ if (info->flags & FPGA_MGR_CONFIG_DMA_BUF)
+ return fpga_dmabuf_load(mgr, info);
if (info->sgt)
return fpga_mgr_buf_load_sg(mgr, info, info->sgt);
if (info->buf && info->count)
@@ -428,18 +469,105 @@ static ssize_t status_show(struct device *dev,
len += sprintf(buf + len, "reconfig IP protocol error\n");
if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
len += sprintf(buf + len, "reconfig fifo overflow error\n");
+ if (status & FPGA_MGR_STATUS_SECURITY_ERR)
+ len += sprintf(buf + len, "reconfig security error\n");
+ if (status & FPGA_MGR_STATUS_DEVICE_INIT_ERR)
+ len += sprintf(buf + len,
+ "initialization has not finished\n");
+ if (status & FPGA_MGR_STATUS_SIGNAL_ERR)
+ len += sprintf(buf + len, "device internal signal error\n");
+ if (status & FPGA_MGR_STATUS_HIGH_Z_STATE_ERR)
+ len += sprintf(buf + len,
+ "all I/Os are placed in High-Z state\n");
+ if (status & FPGA_MGR_STATUS_EOS_ERR)
+ len += sprintf(buf + len,
+ "start-up sequence has not finished\n");
+ if (status & FPGA_MGR_STATUS_FIRMWARE_REQ_ERR)
+ len += sprintf(buf + len, "firmware request error\n");
return len;
}
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ unsigned int len;
+ char image_name[NAME_MAX];
+ int ret;
+
+ /* struct with information about the FPGA image to program. */
+ struct fpga_image_info info = {0};
+
+ /* lose terminating \n */
+ strcpy(image_name, buf);
+ len = strlen(image_name);
+ if (image_name[len - 1] == '\n')
+ image_name[len - 1] = 0;
+
+ ret = fpga_mgr_firmware_load(mgr, &info, image_name);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t key_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return snprintf(buf, ENCRYPTED_KEY_LEN + 1, "%s\n", mgr->key);
+}
+
+static ssize_t key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ memcpy(mgr->key, buf, count);
+
+ return count;
+}
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ return sprintf(buf, "%lx\n", mgr->flags);
+}
+
+static ssize_t flags_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+ int ret;
+
+ ret = kstrtol(buf, 16, &mgr->flags);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static DEVICE_ATTR_RO(name);
static DEVICE_ATTR_RO(state);
static DEVICE_ATTR_RO(status);
+static DEVICE_ATTR_WO(firmware);
+static DEVICE_ATTR_RW(flags);
+static DEVICE_ATTR_RW(key);
static struct attribute *fpga_mgr_attrs[] = {
&dev_attr_name.attr,
&dev_attr_state.attr,
&dev_attr_status.attr,
+ &dev_attr_firmware.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_key.attr,
NULL,
};
ATTRIBUTE_GROUPS(fpga_mgr);
@@ -512,6 +640,104 @@ void fpga_mgr_put(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+#include <linux/debugfs.h>
+
+static int fpga_mgr_read(struct seq_file *s, void *data)
+{
+ struct fpga_manager *mgr = (struct fpga_manager *)s->private;
+ int ret = 0;
+
+ if (!mgr->mops->read)
+ return -ENOENT;
+
+ if (!mutex_trylock(&mgr->ref_mutex))
+ return -EBUSY;
+
+ if (mgr->state != FPGA_MGR_STATE_OPERATING) {
+ ret = -EPERM;
+ goto err_unlock;
+ }
+
+ /* Read the FPGA configuration data from the fabric */
+ ret = mgr->mops->read(mgr, s);
+ if (ret)
+ dev_err(&mgr->dev, "Error while reading configuration data from FPGA\n");
+
+err_unlock:
+ mutex_unlock(&mgr->ref_mutex);
+
+ return ret;
+}
+
+static int fpga_mgr_read_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fpga_mgr_read, inode->i_private);
+}
+
+static const struct file_operations fpga_mgr_ops_image = {
+ .owner = THIS_MODULE,
+ .open = fpga_mgr_read_open,
+ .read = seq_read,
+};
+#endif
+
+static int fpga_dmabuf_fd_get(struct file *file, char __user *argp)
+{
+ struct fpga_manager *mgr = (struct fpga_manager *)(file->private_data);
+ int buffd;
+
+ if (copy_from_user(&buffd, argp, sizeof(buffd)))
+ return -EFAULT;
+
+ mgr->dmabuf = dma_buf_get(buffd);
+ if (IS_ERR_OR_NULL(mgr->dmabuf))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fpga_device_open(struct inode *inode, struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+ struct fpga_manager *mgr = container_of(miscdev,
+ struct fpga_manager, miscdev);
+
+ file->private_data = mgr;
+
+ return 0;
+}
+
+static int fpga_device_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static long fpga_device_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ char __user *argp = (char __user *)arg;
+ int err;
+
+ switch (cmd) {
+ case FPGA_IOCTL_LOAD_DMA_BUFF:
+ err = fpga_dmabuf_fd_get(file, argp);
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static const struct file_operations fpga_fops = {
+ .owner = THIS_MODULE,
+ .open = fpga_device_open,
+ .release = fpga_device_release,
+ .unlocked_ioctl = fpga_device_ioctl,
+ .compat_ioctl = fpga_device_ioctl,
+};
+
/**
* fpga_mgr_lock - Lock FPGA manager for exclusive use
* @mgr: fpga manager
@@ -565,8 +791,7 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
int id, ret;
if (!mops || !mops->write_complete || !mops->state ||
- !mops->write_init || (!mops->write && !mops->write_sg) ||
- (mops->write && mops->write_sg)) {
+ !mops->write_init || (!mops->write && !mops->write_sg)) {
dev_err(dev, "Attempt to register without fpga_manager_ops\n");
return NULL;
}
@@ -599,10 +824,28 @@ struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
mgr->dev.of_node = dev->of_node;
mgr->dev.id = id;
+ /* Make device dma capable by inheriting from parent's */
+ set_dma_ops(&mgr->dev, get_dma_ops(dev));
+ ret = dma_coerce_mask_and_coherent(&mgr->dev, dma_get_mask(dev));
+ if (ret) {
+ dev_warn(dev,
+ "Failed to set DMA mask %llx. Trying to continue... %x\n",
+ dma_get_mask(dev), ret);
+ }
+
ret = dev_set_name(&mgr->dev, "fpga%d", id);
if (ret)
goto error_device;
+ mgr->miscdev.minor = MISC_DYNAMIC_MINOR;
+ mgr->miscdev.name = kobject_name(&mgr->dev.kobj);
+ mgr->miscdev.fops = &fpga_fops;
+ ret = misc_register(&mgr->miscdev);
+ if (ret) {
+ pr_err("fpga: failed to register misc device.\n");
+ goto error_device;
+ }
+
return mgr;
error_device:
@@ -680,6 +923,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
int fpga_mgr_register(struct fpga_manager *mgr)
{
int ret;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *d, *parent;
+#endif
/*
* Initialize framework state by requesting low level driver read state
@@ -692,6 +938,26 @@ int fpga_mgr_register(struct fpga_manager *mgr)
if (ret)
goto error_device;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ mgr->dir = debugfs_create_dir("fpga", NULL);
+ if (!mgr->dir)
+ goto error_device;
+
+ parent = mgr->dir;
+ d = debugfs_create_dir(mgr->dev.kobj.name, parent);
+ if (!d) {
+ debugfs_remove_recursive(parent);
+ goto error_device;
+ }
+
+ parent = d;
+ d = debugfs_create_file("image", 0644, parent, mgr,
+ &fpga_mgr_ops_image);
+ if (!d) {
+ debugfs_remove_recursive(mgr->dir);
+ goto error_device;
+ }
+#endif
dev_info(&mgr->dev, "%s registered\n", mgr->name);
return 0;
@@ -713,6 +979,10 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
{
dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name);
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ debugfs_remove_recursive(mgr->dir);
+#endif
+
/*
* If the low level driver provides a method for putting fpga into
* a desired state upon unregister, do it.
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index e405309baadc..078be266d8b6 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -228,6 +228,9 @@ static struct fpga_image_info *of_fpga_region_parse_ov(
if (of_property_read_bool(overlay, "encrypted-fpga-config"))
info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
+ if (of_property_read_bool(overlay, "fpga-config-from-dmabuf"))
+ info->flags |= FPGA_MGR_CONFIG_DMA_BUF;
+
if (!of_property_read_string(overlay, "firmware-name",
&firmware_name)) {
info->firmware_name = devm_kstrdup(dev, firmware_name,
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
new file mode 100644
index 000000000000..a6694d7fd27a
--- /dev/null
+++ b/drivers/fpga/versal-fpga.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define PDI_SOURCE_TYPE 0xF
+
+/**
+ * struct versal_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the PL Image type
+ */
+struct versal_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int versal_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct versal_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int versal_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct versal_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ char *kbuf;
+ int ret;
+
+ if (IS_ERR(eemi_ops) || !eemi_ops->pdi_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ ret = eemi_ops->pdi_load(PDI_SOURCE_TYPE, dma_addr);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int versal_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states versal_fpga_ops_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_OPERATING;
+}
+
+static const struct fpga_manager_ops versal_fpga_ops = {
+ .state = versal_fpga_ops_state,
+ .write_init = versal_fpga_ops_write_init,
+ .write = versal_fpga_ops_write,
+ .write_complete = versal_fpga_ops_write_complete,
+};
+
+static int versal_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct versal_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int err, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret < 0) {
+ dev_err(dev, "no usable DMA configuration");
+ return ret;
+ }
+
+ mgr = devm_fpga_mgr_create(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ err = fpga_mgr_register(mgr);
+ if (err) {
+ dev_err(dev, "unable to register FPGA manager");
+ fpga_mgr_free(mgr);
+ return err;
+ }
+
+ return 0;
+}
+
+static int versal_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+ fpga_mgr_free(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id versal_fpga_of_match[] = {
+ { .compatible = "xlnx,versal-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, versal_fpga_of_match);
+
+static struct platform_driver versal_fpga_driver = {
+ .probe = versal_fpga_probe,
+ .remove = versal_fpga_remove,
+ .driver = {
+ .name = "versal_fpga_manager",
+ .of_match_table = of_match_ptr(versal_fpga_of_match),
+ },
+};
+
+module_platform_driver(versal_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_AUTHOR("Appana Durga Kedareswara rao <appanad.durga.rao@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Versal FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/xilinx-afi.c b/drivers/fpga/xilinx-afi.c
new file mode 100644
index 000000000000..ae3caf9849df
--- /dev/null
+++ b/drivers/fpga/xilinx-afi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI bridge.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/**
+ * struct afi_fpga - AFI register description
+ * @value: value to be written to the register
+ * @regid: Register id for the register to be written
+ */
+struct afi_fpga {
+ u32 value;
+ u32 regid;
+};
+
+static int afi_fpga_probe(struct platform_device *pdev)
+{
+ struct afi_fpga *afi_fpga;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int i, entries, pairs;
+ u32 reg, val;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, afi_fpga);
+
+ entries = of_property_count_u32_elems(np, "config-afi");
+ if (!entries || (entries % 2)) {
+ dev_err(&pdev->dev, "Invalid number of registers\n");
+ return -EINVAL;
+ }
+ pairs = entries / 2;
+
+ for (i = 0; i < pairs; i++) {
+ ret = of_property_read_u32_index(np, "config-afi", i * 2,
+ &reg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read register\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32_index(np, "config-afi", i * 2 + 1,
+ &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read value\n");
+ return -EINVAL;
+ }
+ ret = eemi_ops->ioctl(0, IOCTL_AFI, reg, val, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "AFI register write error %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id afi_fpga_ids[] = {
+ { .compatible = "xlnx,afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, afi_fpga_ids);
+
+static struct platform_driver afi_fpga_driver = {
+ .driver = {
+ .name = "afi-fpga",
+ .of_match_table = afi_fpga_ids,
+ },
+ .probe = afi_fpga_probe,
+};
+module_platform_driver(afi_fpga_driver);
+
+MODULE_DESCRIPTION("FPGA afi module");
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynq-afi.c b/drivers/fpga/zynq-afi.c
new file mode 100644
index 000000000000..7ce0d089e878
--- /dev/null
+++ b/drivers/fpga/zynq-afi.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA AFI driver.
+ * Copyright (c) 2018 Xilinx Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+/* Registers and special values for doing register-based operations */
+#define AFI_RDCHAN_CTRL_OFFSET 0x00
+#define AFI_WRCHAN_CTRL_OFFSET 0x14
+
+#define AFI_BUSWIDTH_MASK 0x01
+
+/**
+ * struct afi_fpga - AFI register description
+ * @membase: pointer to register struct
+ * @afi_width: AFI bus width to be written
+ */
+struct zynq_afi_fpga {
+ void __iomem *membase;
+ u32 afi_width;
+};
+
+static int zynq_afi_fpga_probe(struct platform_device *pdev)
+{
+ struct zynq_afi_fpga *afi_fpga;
+ struct resource *res;
+ u32 reg_val;
+ u32 val;
+
+ afi_fpga = devm_kzalloc(&pdev->dev, sizeof(*afi_fpga), GFP_KERNEL);
+ if (!afi_fpga)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ afi_fpga->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(afi_fpga->membase))
+ return PTR_ERR(afi_fpga->membase);
+
+ val = device_property_read_u32(&pdev->dev, "xlnx,afi-width",
+ &afi_fpga->afi_width);
+ if (val) {
+ dev_err(&pdev->dev, "Fail to get the afi bus width\n");
+ return -EINVAL;
+ }
+
+ reg_val = readl(afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_RDCHAN_CTRL_OFFSET);
+ reg_val = readl(afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+ reg_val &= ~AFI_BUSWIDTH_MASK;
+ writel(reg_val | afi_fpga->afi_width,
+ afi_fpga->membase + AFI_WRCHAN_CTRL_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id zynq_afi_fpga_ids[] = {
+ { .compatible = "xlnx,zynq-afi-fpga" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, zynq_afi_fpga_ids);
+
+static struct platform_driver zynq_afi_fpga_driver = {
+ .driver = {
+ .name = "zynq-afi-fpga",
+ .of_match_table = zynq_afi_fpga_ids,
+ },
+ .probe = zynq_afi_fpga_probe,
+};
+module_platform_driver(zynq_afi_fpga_driver);
+
+MODULE_DESCRIPTION("ZYNQ FPGA AFI module");
+MODULE_AUTHOR("Nava kishore Manne <nava.manne@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..a6f0cdeaa463 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -9,20 +9,78 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/firmware/xlnx-zynqmp.h>
/* Constant Definitions */
#define IXR_FPGA_DONE_MASK BIT(3)
+#define READ_DMA_SIZE 0x200
+#define DUMMY_FRAMES_SIZE 0x64
+
+/* Error Register */
+#define IXR_FPGA_ERR_CRC_ERR BIT(0)
+#define IXR_FPGA_ERR_SECURITY_ERR BIT(16)
+
+/* Signal Status Register */
+#define IXR_FPGA_END_OF_STARTUP BIT(4)
+#define IXR_FPGA_GST_CFG_B BIT(5)
+#define IXR_FPGA_INIT_B_INTERNAL BIT(11)
+#define IXR_FPGA_DONE_INTERNAL_SIGNAL BIT(13)
+
+#define IXR_FPGA_CONFIG_STAT_OFFSET 7U
+#define IXR_FPGA_READ_CONFIG_TYPE 0U
+
+static bool readback_type;
+module_param(readback_type, bool, 0644);
+MODULE_PARM_DESC(readback_type,
+ "readback_type 0-configuration register read "
+ "1- configuration data read (default: 0)");
+
+/**
+ * struct zynqmp_configreg - Configuration register offsets
+ * @reg: Name of the configuration register.
+ * @offset: Register offset.
+ */
+struct zynqmp_configreg {
+ char *reg;
+ u32 offset;
+};
+
+static struct zynqmp_configreg cfgreg[] = {
+ {.reg = "CRC", .offset = 0},
+ {.reg = "FAR", .offset = 1},
+ {.reg = "FDRI", .offset = 2},
+ {.reg = "FDRO", .offset = 3},
+ {.reg = "CMD", .offset = 4},
+ {.reg = "CTRL0", .offset = 5},
+ {.reg = "MASK", .offset = 6},
+ {.reg = "STAT", .offset = 7},
+ {.reg = "LOUT", .offset = 8},
+ {.reg = "COR0", .offset = 9},
+ {.reg = "MFWR", .offset = 10},
+ {.reg = "CBC", .offset = 11},
+ {.reg = "IDCODE", .offset = 12},
+ {.reg = "AXSS", .offset = 13},
+ {.reg = "COR1", .offset = 14},
+ {.reg = "WBSTR", .offset = 16},
+ {.reg = "TIMER", .offset = 17},
+ {.reg = "BOOTSTS", .offset = 22},
+ {.reg = "CTRL1", .offset = 24},
+ {}
+};
+
/**
* struct zynqmp_fpga_priv - Private data structure
* @dev: Device data structure
* @flags: flags which is used to identify the bitfile type
+ * @size: Size of the Bit-stream used for readback
*/
struct zynqmp_fpga_priv {
struct device *dev;
u32 flags;
+ u32 size;
};
static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
@@ -44,6 +102,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
struct zynqmp_fpga_priv *priv;
dma_addr_t dma_addr;
u32 eemi_flags = 0;
+ size_t dma_size;
char *kbuf;
int ret;
@@ -51,21 +110,43 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
return -ENXIO;
priv = mgr->priv;
+ priv->size = size;
- kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (priv->flags & FPGA_MGR_USERKEY_ENCRYPTED_BITSTREAM)
+ dma_size = size + ENCRYPTED_KEY_LEN;
+ else
+ dma_size = size;
+
+ kbuf = dma_alloc_coherent(priv->dev, dma_size, &dma_addr, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
memcpy(kbuf, buf, size);
+ if (priv->flags & FPGA_MGR_USERKEY_ENCRYPTED_BITSTREAM) {
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_ENCRYPTION_USERKEY;
+ memcpy(kbuf + size, mgr->key, ENCRYPTED_KEY_LEN);
+ } else if (priv->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_ENCRYPTION_DEVKEY;
+ }
+
wmb(); /* ensure all writes are done before initiate FW call */
+ if (priv->flags & FPGA_MGR_DDR_MEM_AUTH_BITSTREAM)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_AUTHENTICATION_DDR;
+ else if (priv->flags & FPGA_MGR_SECURE_MEM_AUTH_BITSTREAM)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_AUTHENTICATION_OCM;
+
if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ if (priv->flags & FPGA_MGR_USERKEY_ENCRYPTED_BITSTREAM)
+ ret = eemi_ops->fpga_load(dma_addr, dma_addr + size,
+ eemi_flags);
+ else
+ ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
- dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+ dma_free_coherent(priv->dev, dma_size, kbuf, dma_addr);
return ret;
}
@@ -91,11 +172,135 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
return FPGA_MGR_STATE_UNKNOWN;
}
+static u64 zynqmp_fpga_ops_status(struct fpga_manager *mgr)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ unsigned int *buf, reg_val;
+ dma_addr_t dma_addr;
+ u64 status = 0;
+ int ret;
+
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_read)
+ return FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, READ_DMA_SIZE,
+ &dma_addr, GFP_KERNEL);
+ if (!buf)
+ return FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+
+ ret = eemi_ops->fpga_read(IXR_FPGA_CONFIG_STAT_OFFSET, dma_addr,
+ IXR_FPGA_READ_CONFIG_TYPE, &reg_val);
+ if (ret) {
+ status = FPGA_MGR_STATUS_FIRMWARE_REQ_ERR;
+ goto free_dmabuf;
+ }
+
+ if (reg_val & IXR_FPGA_ERR_CRC_ERR)
+ status |= FPGA_MGR_STATUS_CRC_ERR;
+ if (reg_val & IXR_FPGA_ERR_SECURITY_ERR)
+ status |= FPGA_MGR_STATUS_SECURITY_ERR;
+ if (!(reg_val & IXR_FPGA_INIT_B_INTERNAL))
+ status |= FPGA_MGR_STATUS_DEVICE_INIT_ERR;
+ if (!(reg_val & IXR_FPGA_DONE_INTERNAL_SIGNAL))
+ status |= FPGA_MGR_STATUS_SIGNAL_ERR;
+ if (!(reg_val & IXR_FPGA_GST_CFG_B))
+ status |= FPGA_MGR_STATUS_HIGH_Z_STATE_ERR;
+ if (!(reg_val & IXR_FPGA_END_OF_STARTUP))
+ status |= FPGA_MGR_STATUS_EOS_ERR;
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, READ_DMA_SIZE, buf, dma_addr);
+
+ return status;
+}
+
+static int zynqmp_fpga_read_cfgreg(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ int ret, val;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ struct zynqmp_configreg *p = cfgreg;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, READ_DMA_SIZE,
+ &dma_addr, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ seq_puts(s, "zynqMP FPGA Configuration register contents are\n");
+
+ while (p->reg) {
+ ret = eemi_ops->fpga_read(p->offset, dma_addr, readback_type,
+ &val);
+ if (ret)
+ goto free_dmabuf;
+ seq_printf(s, "%s --> \t %x \t\r\n", p->reg, val);
+ p++;
+ }
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, READ_DMA_SIZE, buf,
+ dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_fpga_read_cfgdata(struct fpga_manager *mgr,
+ struct seq_file *s)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_fpga_priv *priv;
+ int ret, data_offset;
+ unsigned int *buf;
+ dma_addr_t dma_addr;
+ size_t size;
+
+ priv = mgr->priv;
+ size = priv->size + READ_DMA_SIZE + DUMMY_FRAMES_SIZE;
+
+ buf = dma_alloc_coherent(mgr->dev.parent, size, &dma_addr,
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ seq_puts(s, "zynqMP FPGA Configuration data contents are\n");
+ ret = eemi_ops->fpga_read((priv->size + DUMMY_FRAMES_SIZE) / 4,
+ dma_addr, readback_type, &data_offset);
+ if (ret)
+ goto free_dmabuf;
+
+ seq_write(s, &buf[data_offset], priv->size);
+
+free_dmabuf:
+ dma_free_coherent(mgr->dev.parent, size, buf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_read(struct fpga_manager *mgr, struct seq_file *s)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ int ret;
+
+ if (!eemi_ops || !eemi_ops->fpga_read)
+ return -ENXIO;
+
+ if (readback_type)
+ ret = zynqmp_fpga_read_cfgdata(mgr, s);
+ else
+ ret = zynqmp_fpga_read_cfgreg(mgr, s);
+
+ return ret;
+}
+
static const struct fpga_manager_ops zynqmp_fpga_ops = {
.state = zynqmp_fpga_ops_state,
+ .status = zynqmp_fpga_ops_status,
.write_init = zynqmp_fpga_ops_write_init,
.write = zynqmp_fpga_ops_write,
.write_complete = zynqmp_fpga_ops_write_complete,
+ .read = zynqmp_fpga_ops_read,
};
static int zynqmp_fpga_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 67f9f82e0db0..4d5adc255866 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -1,8 +1,15 @@
-// SPDX-License-Identifier: GPL-2.0-only
/*
* Xilinx gpio driver for xps/axi_gpio IP.
*
* Copyright 2008 - 2013 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bitops.h>
@@ -10,19 +17,32 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
/* Register Offset Definitions */
-#define XGPIO_DATA_OFFSET (0x0) /* Data register */
-#define XGPIO_TRI_OFFSET (0x4) /* I/O direction register */
+#define XGPIO_DATA_OFFSET 0x0 /* Data register */
+#define XGPIO_TRI_OFFSET 0x4 /* I/O direction register */
+#define XGPIO_GIER_OFFSET 0x11c /* Global Interrupt Enable */
+#define XGPIO_GIER_IE BIT(31)
+
+#define XGPIO_IPISR_OFFSET 0x120 /* IP Interrupt Status */
+#define XGPIO_IPIER_OFFSET 0x128 /* IP Interrupt Enable */
#define XGPIO_CHANNEL_OFFSET 0x8
/* Read/Write access to the GPIO registers */
-#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_X86)
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARM64)
# define xgpio_readreg(offset) readl(offset)
# define xgpio_writereg(offset, val) writel(val, offset)
#else
@@ -32,46 +52,32 @@
/**
* struct xgpio_instance - Stores information about GPIO device
- * @gc: GPIO chip
- * @regs: register block
- * @gpio_width: GPIO width for every channel
+ * @mmchip: OF GPIO chip for memory mapped banks
+ * @mmchip_dual: Pointer to the OF dual gpio chip
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
+ * @offset: GPIO channel offset
+ * @irq_base: GPIO channel irq base address
+ * @irq_enable: GPIO irq enable/disable bitfield
+ * @no_init: No intitialisation at probe
* @gpio_lock: Lock used for synchronization
+ * @irq_domain: irq_domain of the controller
+ * @clk: clock resource for this driver
*/
struct xgpio_instance {
- struct gpio_chip gc;
- void __iomem *regs;
- unsigned int gpio_width[2];
- u32 gpio_state[2];
- u32 gpio_dir[2];
- spinlock_t gpio_lock[2];
+ struct of_mm_gpio_chip mmchip;
+ struct of_mm_gpio_chip *mmchip_dual;
+ u32 gpio_state;
+ u32 gpio_dir;
+ u32 offset;
+ int irq_base;
+ u32 irq_enable;
+ bool no_init;
+ spinlock_t gpio_lock;
+ struct irq_domain *irq_domain;
+ struct clk *clk;
};
-static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
-{
- if (gpio >= chip->gpio_width[0])
- return 1;
-
- return 0;
-}
-
-static inline int xgpio_regoffset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return XGPIO_CHANNEL_OFFSET;
-
- return 0;
-}
-
-static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
-{
- if (xgpio_index(chip, gpio))
- return gpio - chip->gpio_width[0];
-
- return gpio;
-}
-
/**
* xgpio_get - Read the specified signal of the GPIO device.
* @gc: Pointer to gpio_chip device structure.
@@ -85,13 +91,13 @@ static inline int xgpio_offset(struct xgpio_instance *chip, int gpio)
*/
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- u32 val;
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
- val = xgpio_readreg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio));
+ void __iomem *regs = mm_gc->regs + chip->offset;
- return !!(val & BIT(xgpio_offset(chip, gpio)));
+ return !!(xgpio_readreg(regs + XGPIO_DATA_OFFSET) & BIT(gpio));
}
/**
@@ -106,22 +112,23 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(gpio);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -137,38 +144,30 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
unsigned long flags;
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, 0);
- int offset, i;
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
+ int i;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signals */
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
- /* Once finished with an index write it out to the register */
- if (index != xgpio_index(chip, i)) {
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[index]);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
- index = xgpio_index(chip, i);
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
- }
if (__test_and_clear_bit(i, mask)) {
- offset = xgpio_offset(chip, i);
if (test_bit(i, bits))
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(i);
else
- chip->gpio_state[index] &= ~BIT(offset);
+ chip->gpio_state &= ~BIT(i);
}
}
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -176,6 +175,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
*
+ * This function sets the direction of specified GPIO signal as input.
+ *
* Return:
* 0 - if direction of GPIO signals is set as input
* otherwise it returns negative error value.
@@ -183,18 +184,18 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
- chip->gpio_dir[index] |= BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir |= BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -214,164 +215,574 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
- struct xgpio_instance *chip = gpiochip_get_data(gc);
- int index = xgpio_index(chip, gpio);
- int offset = xgpio_offset(chip, gpio);
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ void __iomem *regs = mm_gc->regs;
- spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
if (val)
- chip->gpio_state[index] |= BIT(offset);
+ chip->gpio_state |= BIT(gpio);
else
- chip->gpio_state[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
+ chip->gpio_state &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
/* Clear the GPIO bit in shadow register and set direction as output */
- chip->gpio_dir[index] &= ~BIT(offset);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
- xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
+ chip->gpio_dir &= ~BIT(gpio);
+ xgpio_writereg(regs + chip->offset + XGPIO_TRI_OFFSET, chip->gpio_dir);
- spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
/**
* xgpio_save_regs - Set initial values of GPIO pins
- * @chip: Pointer to GPIO instance
+ * @mm_gc: Pointer to memory mapped GPIO chip structure
+ */
+static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct xgpio_instance *chip =
+ container_of(mm_gc, struct xgpio_instance, mmchip);
+ if (chip->no_init) {
+ chip->gpio_state = xgpio_readreg(mm_gc->regs +
+ XGPIO_DATA_OFFSET);
+ chip->gpio_dir = xgpio_readreg(mm_gc->regs + XGPIO_TRI_OFFSET);
+ } else {
+ xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_DATA_OFFSET,
+ chip->gpio_state);
+ xgpio_writereg(mm_gc->regs + chip->offset + XGPIO_TRI_OFFSET,
+ chip->gpio_dir);
+ }
+}
+
+/**
+ * xgpio_xlate - Translate gpio_spec to the GPIO number and flags
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpiospec: gpio specifier as found in the device tree
+ * @flags: A flags pointer based on binding
+ *
+ * Return:
+ * irq number otherwise -EINVAL
*/
-static void xgpio_save_regs(struct xgpio_instance *chip)
+static int xgpio_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
{
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET, chip->gpio_state[0]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET, chip->gpio_dir[0]);
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance,
+ mmchip);
+ if (gc->of_gpio_n_cells == 3 && flags)
+ *flags = gpiospec->args[2];
- if (!chip->gpio_width[1])
- return;
+ if (gpiospec->args[1] == chip->offset)
+ return gpiospec->args[0];
- xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_state[1]);
- xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
- chip->gpio_dir[1]);
+ return -EINVAL;
}
/**
- * xgpio_of_probe - Probe method for the GPIO device.
+ * xgpio_irq_mask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Disable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable &= ~BIT(offset);
+
+ if (!chip->irq_enable) {
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ temp &= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(mm_gc->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Disable global interrupt if channel interrupts are unused */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ if (!temp)
+ xgpio_writereg(mm_gc->regs + XGPIO_GIER_OFFSET,
+ ~XGPIO_GIER_IE);
+
+ }
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_irq_unmask - Write the specified signal of the GPIO device.
+ * @irq_data: per irq and chip data passed down to chip functions
+ */
+static void xgpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned long flags;
+ struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ u32 offset = irq_data->irq - chip->irq_base;
+ u32 temp;
+
+ pr_debug("%s: Enable %d irq, irq_enable_mask 0x%x\n",
+ __func__, offset, chip->irq_enable);
+
+ /* Setup pin as input */
+ xgpio_dir_in(&mm_gc->gc, offset);
+
+ spin_lock_irqsave(&chip->gpio_lock, flags);
+
+ chip->irq_enable |= BIT(offset);
+
+ if (chip->irq_enable) {
+
+ /* Enable per channel interrupt */
+ temp = xgpio_readreg(mm_gc->regs + XGPIO_IPIER_OFFSET);
+ temp |= chip->offset / XGPIO_CHANNEL_OFFSET + 1;
+ xgpio_writereg(mm_gc->regs + XGPIO_IPIER_OFFSET, temp);
+
+ /* Enable global interrupts */
+ xgpio_writereg(mm_gc->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE);
+ }
+
+ spin_unlock_irqrestore(&chip->gpio_lock, flags);
+}
+
+/**
+ * xgpio_set_irq_type - Write the specified signal of the GPIO device.
+ * @irq_data: Per irq and chip data passed down to chip functions
+ * @type: Interrupt type that is to be set for the gpio pin
+ *
+ * Return:
+ * 0 if interrupt type is supported otherwise otherwise -EINVAL
+ */
+static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ /* Only rising edge case is supported now */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* irq chip descriptor */
+static struct irq_chip xgpio_irqchip = {
+ .name = "xgpio",
+ .irq_mask = xgpio_irq_mask,
+ .irq_unmask = xgpio_irq_unmask,
+ .irq_set_type = xgpio_set_irq_type,
+};
+
+/**
+ * xgpio_to_irq - Find out gpio to Linux irq mapping
+ * @gc: Pointer to gpio_chip device structure.
+ * @offset: Gpio pin offset
+ *
+ * Return:
+ * irq number otherwise -EINVAL
+ */
+static int xgpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip = container_of(mm_gc, struct xgpio_instance,
+ mmchip);
+
+ return irq_find_mapping(chip->irq_domain, offset);
+}
+
+/**
+ * xgpio_irqhandler - Gpio interrupt service routine
+ * @desc: Pointer to interrupt description
+ */
+static void xgpio_irqhandler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+
+ struct xgpio_instance *chip = (struct xgpio_instance *)
+ irq_get_handler_data(irq);
+ struct of_mm_gpio_chip *mm_gc = &chip->mmchip;
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ int offset;
+ unsigned long val;
+
+ chained_irq_enter(irqchip, desc);
+
+ val = xgpio_readreg(mm_gc->regs + chip->offset);
+ /* Only rising edge is supported */
+ val &= chip->irq_enable;
+
+ for_each_set_bit(offset, &val, chip->mmchip.gc.ngpio) {
+ generic_handle_irq(chip->irq_base + offset);
+ }
+
+ xgpio_writereg(mm_gc->regs + XGPIO_IPISR_OFFSET,
+ chip->offset / XGPIO_CHANNEL_OFFSET + 1);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
+
+/**
+ * xgpio_irq_setup - Allocate irq for gpio and setup appropriate functions
+ * @np: Device node of the GPIO chip
+ * @chip: Pointer to private gpio channel structure
+ *
+ * Return:
+ * 0 if success, otherwise -1
+ */
+static int xgpio_irq_setup(struct device_node *np, struct xgpio_instance *chip)
+{
+ u32 pin_num;
+ struct resource res;
+
+ int ret = of_irq_to_resource(np, 0, &res);
+
+ if (ret <= 0) {
+ pr_info("GPIO IRQ not connected\n");
+ return 0;
+ }
+
+ chip->mmchip.gc.to_irq = xgpio_to_irq;
+
+ chip->irq_base = irq_alloc_descs(-1, 1, chip->mmchip.gc.ngpio, 0);
+ if (chip->irq_base < 0) {
+ pr_err("Couldn't allocate IRQ numbers\n");
+ return -1;
+ }
+ chip->irq_domain = irq_domain_add_legacy(np, chip->mmchip.gc.ngpio,
+ chip->irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
+ /*
+ * set the irq chip, handler and irq chip data for callbacks for
+ * each pin
+ */
+ for (pin_num = 0; pin_num < chip->mmchip.gc.ngpio; pin_num++) {
+ u32 gpio_irq = irq_find_mapping(chip->irq_domain, pin_num);
+
+ irq_set_lockdep_class(gpio_irq, &gpio_lock_class,
+ &gpio_request_class);
+ pr_debug("IRQ Base: %d, Pin %d = IRQ %d\n",
+ chip->irq_base, pin_num, gpio_irq);
+ irq_set_chip_and_handler(gpio_irq, &xgpio_irqchip,
+ handle_simple_irq);
+ irq_set_chip_data(gpio_irq, (void *)chip);
+ }
+ irq_set_handler_data(res.start, (void *)chip);
+ irq_set_chained_handler(res.start, xgpio_irqhandler);
+
+ return 0;
+}
+
+static int xgpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret = pm_runtime_get_sync(chip->parent);
+
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
+
+static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pm_runtime_put(chip->parent);
+}
+
+static int __maybe_unused xgpio_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+ struct irq_data *data;
+
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_dbg(dev, "failed to get IRQ\n");
+ return 0;
+ }
+
+ data = irq_get_irq_data(irq);
+ if (!irqd_is_wakeup_set(data))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ return clk_enable(gpio->clk);
+}
+
+static const struct dev_pm_ops xgpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
+ xgpio_runtime_resume, NULL)
+};
+
+/**
+ * xgpio_remove - Remove method for the GPIO device.
* @pdev: pointer to the platform device
*
+ * This function remove gpiochips and frees all the allocated resources.
+ *
+ * Return: 0 always
+ */
+static int xgpio_remove(struct platform_device *pdev)
+{
+ struct xgpio_instance *chip = platform_get_drvdata(pdev);
+
+ of_mm_gpiochip_remove(&chip->mmchip);
+ if (chip->mmchip_dual)
+ of_mm_gpiochip_remove(chip->mmchip_dual);
+ if (!pm_runtime_suspended(&pdev->dev))
+ clk_disable(chip->clk);
+ clk_unprepare(chip->clk);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/**
+ * xgpio_of_probe - Probe method for the GPIO device.
+ * @pdev: platform device instance
+ *
+ * This function probes the GPIO device in the device tree. It initializes the
+ * driver data structure.
+ *
* Return:
* It returns 0, if the driver is bound to the GPIO device, or
* a negative value if there is an error.
*/
-static int xgpio_probe(struct platform_device *pdev)
+static int xgpio_of_probe(struct platform_device *pdev)
{
- struct xgpio_instance *chip;
- int status = 0;
struct device_node *np = pdev->dev.of_node;
- u32 is_dual;
+ struct xgpio_instance *chip, *chip_dual;
+ int status = 0;
+ const u32 *tree_info;
+ u32 ngpio;
+ u32 cells = 2;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- platform_set_drvdata(pdev, chip);
-
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
+ of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip->gpio_dir = 0xFFFFFFFF;
/* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
- chip->gpio_dir[0] = 0xFFFFFFFF;
+ of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir);
+
+ chip->no_init = of_property_read_bool(np, "xlnx,no-init");
+
+ /* Update cells with gpio-cells value */
+ of_property_read_u32(np, "#gpio-cells", &cells);
/*
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
- chip->gpio_width[0] = 32;
+ if (of_property_read_u32(np, "xlnx,gpio-width", &ngpio))
+ ngpio = 32;
+ chip->mmchip.gc.ngpio = (u16)ngpio;
+
+ spin_lock_init(&chip->gpio_lock);
+
+ chip->mmchip.gc.parent = &pdev->dev;
+ chip->mmchip.gc.owner = THIS_MODULE;
+ chip->mmchip.gc.of_xlate = xgpio_xlate;
+ chip->mmchip.gc.of_gpio_n_cells = cells;
+ chip->mmchip.gc.direction_input = xgpio_dir_in;
+ chip->mmchip.gc.direction_output = xgpio_dir_out;
+ chip->mmchip.gc.get = xgpio_get;
+ chip->mmchip.gc.set = xgpio_set;
+ chip->mmchip.gc.request = xgpio_request;
+ chip->mmchip.gc.free = xgpio_free;
+ chip->mmchip.gc.set_multiple = xgpio_set_multiple;
+
+ chip->mmchip.save_regs = xgpio_save_regs;
- spin_lock_init(&chip->gpio_lock[0]);
+ platform_set_drvdata(pdev, chip);
- if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
- is_dual = 0;
+ chip->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(chip->clk)) {
+ if (PTR_ERR(chip->clk) != -ENOENT) {
+ if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Input clock not found\n");
+ return PTR_ERR(chip->clk);
+ }
+
+ /*
+ * Clock framework support is optional, continue on
+ * anyways if we don't find a matching clock.
+ */
+ chip->clk = NULL;
+ }
+
+ status = clk_prepare_enable(chip->clk);
+ if (status < 0) {
+ dev_err(&pdev->dev, "Failed to prepare clk\n");
+ return status;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Call the OF gpio helper to setup and register the GPIO device */
+ status = of_mm_gpiochip_add(np, &chip->mmchip);
+ if (status) {
+ pr_err("%pOF: error in probe function with status %d\n",
+ np, status);
+ goto err_unprepare_clk;
+ }
+
+ status = xgpio_irq_setup(np, chip);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+
+ pr_info("XGpio: %s: registered, base is %d\n", np->full_name,
+ chip->mmchip.gc.base);
+
+ tree_info = of_get_property(np, "xlnx,is-dual", NULL);
+ if (tree_info && be32_to_cpup(tree_info)) {
+ chip_dual = devm_kzalloc(&pdev->dev, sizeof(*chip_dual),
+ GFP_KERNEL);
+ if (!chip_dual)
+ goto err_pm_put;
+
+ /* Add dual channel offset */
+ chip_dual->offset = XGPIO_CHANNEL_OFFSET;
- if (is_dual) {
/* Update GPIO state shadow register with default value */
of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]);
+ &chip_dual->gpio_state);
+
+ /* By default, all pins are inputs */
+ chip_dual->gpio_dir = 0xFFFFFFFF;
/* Update GPIO direction shadow register with default value */
- if (of_property_read_u32(np, "xlnx,tri-default-2",
- &chip->gpio_dir[1]))
- chip->gpio_dir[1] = 0xFFFFFFFF;
+ of_property_read_u32(np, "xlnx,tri-default-2",
+ &chip_dual->gpio_dir);
/*
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio2-width",
- &chip->gpio_width[1]))
- chip->gpio_width[1] = 32;
-
- spin_lock_init(&chip->gpio_lock[1]);
- }
-
- chip->gc.base = -1;
- chip->gc.ngpio = chip->gpio_width[0] + chip->gpio_width[1];
- chip->gc.parent = &pdev->dev;
- chip->gc.direction_input = xgpio_dir_in;
- chip->gc.direction_output = xgpio_dir_out;
- chip->gc.get = xgpio_get;
- chip->gc.set = xgpio_set;
- chip->gc.set_multiple = xgpio_set_multiple;
-
- chip->gc.label = dev_name(&pdev->dev);
-
- chip->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(chip->regs)) {
- dev_err(&pdev->dev, "failed to ioremap memory resource\n");
- return PTR_ERR(chip->regs);
- }
-
- xgpio_save_regs(chip);
+ if (of_property_read_u32(np, "xlnx,gpio2-width", &ngpio))
+ ngpio = 32;
+ chip_dual->mmchip.gc.ngpio = (u16)ngpio;
+
+ spin_lock_init(&chip_dual->gpio_lock);
+
+ chip_dual->mmchip.gc.parent = &pdev->dev;
+ chip_dual->mmchip.gc.owner = THIS_MODULE;
+ chip_dual->mmchip.gc.of_xlate = xgpio_xlate;
+ chip_dual->mmchip.gc.of_gpio_n_cells = cells;
+ chip_dual->mmchip.gc.direction_input = xgpio_dir_in;
+ chip_dual->mmchip.gc.direction_output = xgpio_dir_out;
+ chip_dual->mmchip.gc.get = xgpio_get;
+ chip_dual->mmchip.gc.set = xgpio_set;
+ chip_dual->mmchip.gc.request = xgpio_request;
+ chip_dual->mmchip.gc.free = xgpio_free;
+ chip_dual->mmchip.gc.set_multiple = xgpio_set_multiple;
+
+ chip_dual->mmchip.save_regs = xgpio_save_regs;
+
+ chip->mmchip_dual = &chip_dual->mmchip;
+
+ status = xgpio_irq_setup(np, chip_dual);
+ if (status) {
+ pr_err("%s: GPIO IRQ initialization failed %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
- status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
- if (status) {
- dev_err(&pdev->dev, "failed to add GPIO chip\n");
- return status;
+ /* Call the OF gpio helper to setup and register the GPIO dev */
+ status = of_mm_gpiochip_add(np, &chip_dual->mmchip);
+ if (status) {
+ pr_err("%s: error in probe function with status %d\n",
+ np->full_name, status);
+ goto err_pm_put;
+ }
+ pr_info("XGpio: %s: dual channel registered, base is %d\n",
+ np->full_name, chip_dual->mmchip.gc.base);
}
+ pm_runtime_put(&pdev->dev);
return 0;
+
+err_pm_put:
+ pm_runtime_put(&pdev->dev);
+err_unprepare_clk:
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(chip->clk);
+ return status;
}
static const struct of_device_id xgpio_of_match[] = {
{ .compatible = "xlnx,xps-gpio-1.00.a", },
{ /* end of list */ },
};
-
MODULE_DEVICE_TABLE(of, xgpio_of_match);
-static struct platform_driver xgpio_plat_driver = {
- .probe = xgpio_probe,
- .driver = {
- .name = "gpio-xilinx",
- .of_match_table = xgpio_of_match,
+static struct platform_driver xilinx_gpio_driver = {
+ .probe = xgpio_of_probe,
+ .remove = xgpio_remove,
+ .driver = {
+ .name = "xilinx-gpio",
+ .of_match_table = xgpio_of_match,
+ .pm = &xgpio_dev_pm_ops,
},
};
-static int __init xgpio_init(void)
-{
- return platform_driver_register(&xgpio_plat_driver);
-}
-
-subsys_initcall(xgpio_init);
-
-static void __exit xgpio_exit(void)
-{
- platform_driver_unregister(&xgpio_plat_driver);
-}
-module_exit(xgpio_exit);
+module_platform_driver(xilinx_gpio_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx GPIO driver");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 05ba16fffdad..9a4e6a84ef90 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -10,6 +10,7 @@
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -21,6 +22,9 @@
/* Maximum banks */
#define ZYNQ_GPIO_MAX_BANK 4
#define ZYNQMP_GPIO_MAX_BANK 6
+#define VERSAL_GPIO_MAX_BANK 4
+#define PMC_GPIO_MAX_BANK 5
+#define VERSAL_UNUSED_BANKS 2
#define ZYNQ_GPIO_BANK0_NGPIO 32
#define ZYNQ_GPIO_BANK1_NGPIO 22
@@ -95,6 +99,7 @@
/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
#define GPIO_QUIRK_DATA_RO_BUG BIT(1)
+#define GPIO_QUIRK_VERSAL BIT(2)
struct gpio_regs {
u32 datamsw[ZYNQMP_GPIO_MAX_BANK];
@@ -116,6 +121,7 @@ struct gpio_regs {
* @irq: interrupt for the GPIO device
* @p_data: pointer to platform data
* @context: context registers
+ * @dirlock: lock used for direction in/out synchronization
*/
struct zynq_gpio {
struct gpio_chip chip;
@@ -124,6 +130,7 @@ struct zynq_gpio {
int irq;
const struct zynq_platform_data *p_data;
struct gpio_regs context;
+ spinlock_t dirlock; /*lock used for direction in/out synchronization */
};
/**
@@ -196,6 +203,8 @@ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
gpio->p_data->bank_min[bank];
return;
}
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank = bank + VERSAL_UNUSED_BANKS;
}
/* default */
@@ -297,6 +306,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
@@ -310,9 +320,11 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg &= ~BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
return 0;
}
@@ -334,11 +346,13 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
{
u32 reg;
unsigned int bank_num, bank_pin_num;
+ unsigned long flags;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
/* set the GPIO pin as output */
+ spin_lock_irqsave(&gpio->dirlock, flags);
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
@@ -347,6 +361,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
reg |= BIT(bank_pin_num);
writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
+ spin_unlock_irqrestore(&gpio->dirlock, flags);
/* set the state of the pin */
zynq_gpio_set_value(chip, pin, state);
@@ -647,6 +662,8 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
int_enb = readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
zynq_gpio_handle_bank_irq(gpio, bank_num, int_sts & ~int_enb);
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
chained_irq_exit(irqchip, desc);
@@ -676,6 +693,8 @@ static void zynq_gpio_save_context(struct zynq_gpio *gpio)
gpio->context.int_any[bank_num] =
readl_relaxed(gpio->base_addr +
ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
}
@@ -707,6 +726,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
writel_relaxed(~(gpio->context.int_en[bank_num]),
gpio->base_addr +
ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
}
}
@@ -715,6 +736,9 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
+ if (!device_may_wakeup(dev))
+ disable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
zynq_gpio_save_context(gpio);
return pm_runtime_force_suspend(dev);
@@ -729,6 +753,9 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct irq_data *data = irq_get_irq_data(gpio->irq);
int ret;
+ if (!device_may_wakeup(dev))
+ enable_irq(gpio->irq);
+
if (!irqd_is_wakeup_set(data)) {
ret = pm_runtime_force_resume(dev);
zynq_gpio_restore_context(gpio);
@@ -778,6 +805,31 @@ static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
zynq_gpio_runtime_resume, NULL)
};
+static const struct zynq_platform_data versal_gpio_def = {
+ .label = "versal_gpio",
+ .quirks = GPIO_QUIRK_VERSAL,
+ .ngpio = 58,
+ .max_bank = VERSAL_GPIO_MAX_BANK,
+ .bank_min[0] = 0,
+ .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */
+ .bank_min[3] = 26,
+ .bank_max[3] = 57, /* Bank 3 is connected to FMIOs (32 pins) */
+};
+
+static const struct zynq_platform_data pmc_gpio_def = {
+ .label = "pmc_gpio",
+ .ngpio = 116,
+ .max_bank = PMC_GPIO_MAX_BANK,
+ .bank_min[0] = 0,
+ .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */
+ .bank_min[1] = 26,
+ .bank_max[1] = 51, /* Bank 1 are connected to MIOs (26 pins) */
+ .bank_min[3] = 52,
+ .bank_max[3] = 83, /* Bank 3 is connected to EMIOs (32 pins) */
+ .bank_min[4] = 84,
+ .bank_max[4] = 115, /* Bank 4 is connected to EMIOs (32 pins) */
+};
+
static const struct zynq_platform_data zynqmp_gpio_def = {
.label = "zynqmp_gpio",
.quirks = GPIO_QUIRK_DATA_RO_BUG,
@@ -815,6 +867,8 @@ static const struct zynq_platform_data zynq_gpio_def = {
static const struct of_device_id zynq_gpio_of_match[] = {
{ .compatible = "xlnx,zynq-gpio-1.0", .data = &zynq_gpio_def },
{ .compatible = "xlnx,zynqmp-gpio-1.0", .data = &zynqmp_gpio_def },
+ { .compatible = "xlnx,versal-gpio-1.0", .data = &versal_gpio_def },
+ { .compatible = "xlnx,pmc-gpio-1.0", .data = &pmc_gpio_def },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_gpio_of_match);
@@ -876,7 +930,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
/* Retrieve GPIO clock */
gpio->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(gpio->clk)) {
- dev_err(&pdev->dev, "input clock not found.\n");
+ if (PTR_ERR(gpio->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found.\n");
return PTR_ERR(gpio->clk);
}
ret = clk_prepare_enable(gpio->clk);
@@ -885,6 +940,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
return ret;
}
+ spin_lock_init(&gpio->dirlock);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -892,9 +949,12 @@ static int zynq_gpio_probe(struct platform_device *pdev)
goto err_pm_dis;
/* disable interrupts for all banks */
- for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++)
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+ if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL)
+ bank_num = bank_num + VERSAL_UNUSED_BANKS;
+ }
/* Set up the GPIO irqchip */
girq = &chip->irq;
@@ -919,6 +979,8 @@ static int zynq_gpio_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ irq_set_status_flags(gpio->irq, IRQ_DISABLE_UNLAZY);
+ device_init_wakeup(&pdev->dev, 1);
pm_runtime_put(&pdev->dev);
return 0;
@@ -960,22 +1022,7 @@ static struct platform_driver zynq_gpio_driver = {
.remove = zynq_gpio_remove,
};
-/**
- * zynq_gpio_init - Initial driver registration call
- *
- * Return: value from platform_driver_register
- */
-static int __init zynq_gpio_init(void)
-{
- return platform_driver_register(&zynq_gpio_driver);
-}
-postcore_initcall(zynq_gpio_init);
-
-static void __exit zynq_gpio_exit(void)
-{
- platform_driver_unregister(&zynq_gpio_driver);
-}
-module_exit(zynq_gpio_exit);
+module_platform_driver(zynq_gpio_driver);
MODULE_AUTHOR("Xilinx Inc.");
MODULE_DESCRIPTION("Zynq GPIO driver");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 43594978958e..482718d23301 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -346,6 +346,8 @@ source "drivers/gpu/drm/bridge/Kconfig"
source "drivers/gpu/drm/sti/Kconfig"
+source "drivers/gpu/drm/zocl/Kconfig"
+
source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/ingenic/Kconfig"
@@ -372,6 +374,8 @@ source "drivers/gpu/drm/tiny/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/xlnx/Kconfig"
+
source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7f72ef5e7811..2483104f74af 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
+obj-$(CONFIG_DRM_ZOCL)) += zocl/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
@@ -116,6 +117,7 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_XLNX) += xlnx/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9801c0333eca..bcf2eab31022 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -69,7 +69,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
u8 h_div = 1, v_div = 1;
u32 block_w = drm_format_info_block_width(fb->format, plane);
u32 block_h = drm_format_info_block_height(fb->format, plane);
- u32 block_size = fb->format->char_per_block[plane];
u32 sample_x;
u32 sample_y;
u32 block_start_y;
@@ -92,7 +91,7 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
num_hblocks = sample_x / block_w;
paddr += fb->pitches[plane] * block_start_y;
- paddr += block_size * num_hblocks;
+ paddr += drm_format_plane_width_bytes(fb->format, plane, num_hblocks);
return paddr;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4dd12a069474..33131bb99df5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
* considered as a broken and legacy behaviour from a modern fbdev device.
*/
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
-static bool drm_leak_fbdev_smem = false;
+static bool drm_leak_fbdev_smem = true;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index b234bfaeda06..74ba30c1ac6d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -29,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
static char printable_char(int c)
{
@@ -224,6 +225,8 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XV15, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 2, }, /* FIXME consider is_yuv = true */
+ { .format = DRM_FORMAT_XV20, .depth = 0, .num_planes = 2, .pixels_per_macropixel = { 3, 3, 0 }, .bytes_per_macropixel = { 4, 8, 0 }, .hsub = 2, .vsub = 1, },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
@@ -274,6 +277,11 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
.num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
.is_yuv = true },
+ { .format = DRM_FORMAT_AVUY, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XVUY8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XVUY2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y8, .depth = 0, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_Y10, .depth = 0, .num_planes = 1, .pixels_per_macropixel = { 3, 0, 0 }, .bytes_per_macropixel = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
};
unsigned int i;
@@ -393,3 +401,38 @@ uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
drm_format_info_block_height(info, plane));
}
EXPORT_SYMBOL(drm_format_info_min_pitch);
+
+/**
+ * drm_format_plane_width_bytes - bytes of the given width of the plane
+ * @info: DRM format information
+ * @plane: plane index
+ * @width: width to get the number of bytes
+ *
+ * This returns the number of bytes for given @width and @plane.
+ * The @char_per_block or macro pixel information should be valid.
+ *
+ * Returns:
+ * The bytes of @width of @plane. 0 for invalid format info.
+ */
+uint64_t drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, unsigned int width)
+{
+ if (!info || plane >= info->num_planes)
+ return 0;
+
+ if (info->char_per_block[plane])
+ return drm_format_info_min_pitch(info, plane, width);
+
+ if (WARN_ON(!info->bytes_per_macropixel[plane] ||
+ !info->pixels_per_macropixel[plane])) {
+ struct drm_format_name_buf buf;
+
+ DRM_WARN("Either cpp or macro-pixel info should be valid: %s\n",
+ drm_get_format_name(info->format, &buf));
+ return 0;
+ }
+
+ return DIV_ROUND_UP(width * info->bytes_per_macropixel[plane],
+ info->pixels_per_macropixel[plane]);
+}
+EXPORT_SYMBOL(drm_format_plane_width_bytes);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57ac94ce9b9e..4c568d992d7d 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -202,7 +202,8 @@ static int framebuffer_check(struct drm_device *dev,
for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i);
- unsigned int block_size = info->char_per_block[i];
+ unsigned int block_size = info->char_per_block[i] ||
+ info->bytes_per_macropixel[i];
u64 min_pitch = drm_format_info_min_pitch(info, i, width);
if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
@@ -296,7 +297,8 @@ drm_internal_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+ if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS |
+ DRM_MODE_FB_ALTERNATE_TOP | DRM_MODE_FB_ALTERNATE_BOTTOM)) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 6f19e1c35e30..baff73e82eaf 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -2,6 +2,13 @@
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
+config DRM_I2C_ADV7511_LEGACY
+ tristate "AV7511 encoder"
+ depends on OF && !DRM_I2C_ADV7511
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index a962f6f08568..c6450d638af1 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,4 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -Iinclude/drm
+
+obj-$(CONFIG_DRM_I2C_ADV7511_LEGACY) += adv7511.o
+
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
new file mode 100644
index 000000000000..3ebbe76972cf
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -0,0 +1,1025 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_probe_helper.h>
+
+#include "adv7511.h"
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+
+ struct regmap *regmap;
+ struct regmap *packet_memory_regmap;
+ enum drm_connector_status status;
+ bool powered;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+};
+
+static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
+{
+ return to_encoder_slave(encoder)->slave_priv;
+}
+
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7511_fixed_registers[] = {
+ { 0x98, 0x03 },
+ { 0x9a, 0xe0 },
+ { 0x9c, 0x30 },
+ { 0x9d, 0x61 },
+ { 0xa2, 0xa4 },
+ { 0xa3, 0xa4 },
+ { 0xe0, 0xd0 },
+ { 0xf9, 0x00 },
+ { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+ 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+ 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+ 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+ 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+ 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADV7511_REG_CHIP_REVISION:
+ case ADV7511_REG_SPDIF_FREQ:
+ case ADV7511_REG_CTS_AUTOMATIC1:
+ case ADV7511_REG_CTS_AUTOMATIC2:
+ case ADV7511_REG_VIC_DETECTED:
+ case ADV7511_REG_VIC_SEND:
+ case ADV7511_REG_AUX_VIC_DETECTED:
+ case ADV7511_REG_STATUS:
+ case ADV7511_REG_GC(1):
+ case ADV7511_REG_INT(0):
+ case ADV7511_REG_INT(1):
+ case ADV7511_REG_PLL_STATUS:
+ case ADV7511_REG_AN(0):
+ case ADV7511_REG_AN(1):
+ case ADV7511_REG_AN(2):
+ case ADV7511_REG_AN(3):
+ case ADV7511_REG_AN(4):
+ case ADV7511_REG_AN(5):
+ case ADV7511_REG_AN(6):
+ case ADV7511_REG_AN(7):
+ case ADV7511_REG_HDCP_STATUS:
+ case ADV7511_REG_BCAPS:
+ case ADV7511_REG_BKSV(0):
+ case ADV7511_REG_BKSV(1):
+ case ADV7511_REG_BKSV(2):
+ case ADV7511_REG_BKSV(3):
+ case ADV7511_REG_BKSV(4):
+ case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_EDID_READ_CTRL:
+ case ADV7511_REG_BSTATUS(0):
+ case ADV7511_REG_BSTATUS(1):
+ case ADV7511_REG_CHIP_ID_HIGH:
+ case ADV7511_REG_CHIP_ID_LOW:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults_raw = adv7511_register_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+ .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+ const uint16_t *coeff,
+ unsigned int scaling_factor)
+{
+ unsigned int i;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+ if (enable) {
+ for (i = 0; i < 12; ++i) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_CSC_UPPER(i),
+ 0x1f, coeff[i] >> 8);
+ regmap_write(adv7511->regmap,
+ ADV7511_REG_CSC_LOWER(i),
+ coeff[i] & 0xff);
+ }
+ }
+
+ if (enable)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0xe0, 0x80 | (scaling_factor << 5));
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0x80, 0x00);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0xff);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0xff);
+ }
+
+ return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0x00);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0x00);
+ }
+
+ return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+ 0x0734, 0x04ad, 0x0000, 0x1c1b,
+ 0x1ddc, 0x04ad, 0x1f24, 0x0135,
+ 0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+ struct drm_connector *connector,
+ bool rgb)
+{
+ struct adv7511_video_config config;
+ bool output_format_422, output_format_ycbcr;
+ unsigned int mode;
+ uint8_t infoframe[17];
+
+ if (adv7511->edid)
+ config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+ else
+ config.hdmi_mode = false;
+
+ hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+ config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ if (rgb) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ } else {
+ config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+ config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+ if ((connector->display_info.color_formats &
+ DRM_COLOR_FORMAT_YCRCB422) &&
+ config.hdmi_mode) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace =
+ HDMI_COLORSPACE_YUV422;
+ } else {
+ config.csc_enable = true;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ }
+ }
+
+ if (config.hdmi_mode) {
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+ switch (config.avi_infoframe.colorspace) {
+ case HDMI_COLORSPACE_YUV444:
+ output_format_422 = false;
+ output_format_ycbcr = true;
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ output_format_422 = true;
+ output_format_ycbcr = true;
+ break;
+ default:
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ break;
+ }
+ } else {
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ }
+
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+ adv7511_set_colormap(adv7511, config.csc_enable,
+ config.csc_coefficents,
+ config.csc_scaling_factor);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+ (output_format_422 << 7) | output_format_ycbcr);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+ ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+ hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+ sizeof(infoframe));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ infoframe + 1, sizeof(infoframe) - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+ const struct adv7511_link_config *config)
+{
+ /*
+ * The input style values documented in the datasheet don't match the
+ * hardware register field values :-(
+ */
+ static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+ unsigned int clock_delay;
+ unsigned int color_depth;
+ unsigned int input_id;
+
+ clock_delay = (config->clock_delay + 1200) / 400;
+ color_depth = config->input_color_depth == 8 ? 3
+ : (config->input_color_depth == 10 ? 1 : 2);
+
+ /* TODO Support input ID 6 */
+ if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+ input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+ ? 5 : 0;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+ input_id = config->embedded_sync ? 8 : 7;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+ input_id = config->embedded_sync ? 4 : 3;
+ else
+ input_id = config->embedded_sync ? 2 : 1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+ input_id);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+ (color_depth << 4) |
+ (input_styles[config->input_style] << 2));
+ regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+ config->input_justification << 3);
+ regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+ config->sync_pulse << 2);
+
+ regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+ adv7511->embedded_sync = config->embedded_sync;
+ adv7511->hsync_polarity = config->hsync_polarity;
+ adv7511->vsync_polarity = config->vsync_polarity;
+ adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ adv7511->current_edid_segment = -1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+
+ /*
+ * Per spec it is allowed to pulse the HPD signal to indicate that the
+ * EDID information has changed. Some monitors do this when they wakeup
+ * from standby or are enabled. When the HPD goes low the adv7511 is
+ * reset and the outputs are disabled which might cause the monitor to
+ * go to standby again. To avoid this we ignore the HPD pin for the
+ * first few seconds after enabling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
+
+ /*
+ * Most of the registers are reset during power down or when HPD is low.
+ */
+ regcache_sync(adv7511->regmap);
+
+ adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+
+ adv7511->powered = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+ unsigned int irq0;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return false;
+
+ if (irq0 & ADV7511_INT0_HPD) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_HPD);
+ return true;
+ }
+
+ return false;
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511)
+{
+ unsigned int irq0, irq1;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
+
+ if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;
+
+ ret = adv7511_irq_process(adv7511);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+{
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+ adv7511->edid_read, msecs_to_jiffies(timeout));
+ } else {
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511);
+ if (ret < 0)
+ break;
+
+ if (adv7511->edid_read)
+ break;
+
+ msleep(25);
+ }
+ }
+
+ return adv7511->edid_read ? 0 : -EIO;
+}
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct adv7511 *adv7511 = data;
+ struct i2c_msg xfer[2];
+ uint8_t offset;
+ unsigned int i;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ if (adv7511->current_edid_segment != block / 2) {
+ unsigned int status;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if (status != 2) {
+ adv7511->edid_read = false;
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+
+ xfer[0].addr = adv7511->i2c_edid->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &offset;
+ xfer[1].addr = adv7511->i2c_edid->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 64;
+ xfer[1].buf = adv7511->edid_buf;
+
+ offset = 0;
+
+ for (i = 0; i < 4; ++i) {
+ ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+ ARRAY_SIZE(xfer));
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+
+ xfer[1].buf += 64;
+ offset += 64;
+ }
+
+ adv7511->current_edid_segment = block / 2;
+ }
+
+ if (block % 2 == 0)
+ memcpy(buf, adv7511->edid_buf, len);
+ else
+ memcpy(buf, adv7511->edid_buf + 128, len);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ struct edid *edid;
+ unsigned int count;
+
+ /* Reading the EDID only works if the device is powered */
+ if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+ adv7511->current_edid_segment = -1;
+ }
+
+ edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+ if (!adv7511->powered)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+
+ kfree(adv7511->edid);
+ adv7511->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+ return count;
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ enum drm_connector_status status;
+ unsigned int val;
+ bool hpd;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ return connector_status_disconnected;
+
+ if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ hpd = adv7511_hpd(adv7511);
+
+ /* The chip resets itself when the cable is disconnected, so in case
+ * there is a pending HPD interrupt and the cable is connected there was
+ * at least one transition from disconnected to connected and the chip
+ * has to be reinitialized. */
+ if (status == connector_status_connected && hpd && adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ adv7511_get_modes(encoder, connector);
+ if (adv7511->status == connector_status_connected)
+ status = connector_status_disconnected;
+ } else {
+ /* Renable HPD sensing */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
+ }
+
+ adv7511->status = status;
+ return status;
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+ unsigned int low_refresh_rate;
+ unsigned int hsync_polarity = 0;
+ unsigned int vsync_polarity = 0;
+
+ if (adv7511->embedded_sync) {
+ unsigned int hsync_offset, hsync_len;
+ unsigned int vsync_offset, vsync_len;
+
+ hsync_offset = adj_mode->crtc_hsync_start -
+ adj_mode->crtc_hdisplay;
+ vsync_offset = adj_mode->crtc_vsync_start -
+ adj_mode->crtc_vdisplay;
+ hsync_len = adj_mode->crtc_hsync_end -
+ adj_mode->crtc_hsync_start;
+ vsync_len = adj_mode->crtc_vsync_end -
+ adj_mode->crtc_vsync_start;
+
+ /* The hardware vsync generator has a off-by-one bug */
+ vsync_offset += 1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+ ((hsync_offset >> 10) & 0x7) << 5);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+ (hsync_offset >> 2) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+ ((hsync_offset & 0x3) << 6) |
+ ((hsync_len >> 4) & 0x3f));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+ ((hsync_len & 0xf) << 4) |
+ ((vsync_offset >> 6) & 0xf));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+ ((vsync_offset & 0x3f) << 2) |
+ ((vsync_len >> 8) & 0x3));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+ vsync_len & 0xff);
+
+ hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+ } else {
+ enum adv7511_sync_polarity mode_hsync_polarity;
+ enum adv7511_sync_polarity mode_vsync_polarity;
+
+ /**
+ * If the input signal is always low or always high we want to
+ * invert or let it passthrough depending on the polarity of the
+ * current mode.
+ **/
+ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adv7511->hsync_polarity != mode_hsync_polarity &&
+ adv7511->hsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ hsync_polarity = 1;
+
+ if (adv7511->vsync_polarity != mode_vsync_polarity &&
+ adv7511->vsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ vsync_polarity = 1;
+ }
+
+ if (mode->vrefresh <= 24000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+ else if (mode->vrefresh <= 25000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+ else if (mode->vrefresh <= 30000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+ else
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+ /*
+ * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+ * supposed to give better results.
+ */
+
+ adv7511->f_tmds = mode->clock;
+}
+
+static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
+ .dpms = adv7511_encoder_dpms,
+ .mode_valid = adv7511_encoder_mode_valid,
+ .mode_set = adv7511_encoder_mode_set,
+ .detect = adv7511_encoder_detect,
+ .get_modes = adv7511_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+ struct adv7511_link_config *config)
+{
+ const char *str;
+ int ret;
+
+ memset(config, 0, sizeof(*config));
+
+ of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+ if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+ config->input_color_depth != 12)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-colorspace", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "rgb"))
+ config->input_colorspace = HDMI_COLORSPACE_RGB;
+ else if (!strcmp(str, "yuv422"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV422;
+ else if (!strcmp(str, "yuv444"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-clock", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "1x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_1X;
+ else if (!strcmp(str, "2x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_2X;
+ else if (!strcmp(str, "ddr"))
+ config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+ else
+ return -EINVAL;
+
+ if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+ config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+ ret = of_property_read_u32(np, "adi,input-style",
+ &config->input_style);
+ if (ret)
+ return ret;
+
+ if (config->input_style < 1 || config->input_style > 3)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-justification",
+ &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "left"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_LEFT;
+ else if (!strcmp(str, "evenly"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_EVENLY;
+ else if (!strcmp(str, "right"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_RIGHT;
+ else
+ return -EINVAL;
+
+ } else {
+ config->input_style = 1;
+ config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+ }
+
+ of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+ if (config->clock_delay < -1200 || config->clock_delay > 1600)
+ return -EINVAL;
+
+ config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+ /* Hardcode the sync pulse configurations for now. */
+ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+ config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+ config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+ return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct adv7511_link_config link_config;
+ struct adv7511 *adv7511;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+ if (!adv7511)
+ return -ENOMEM;
+
+ adv7511->powered = false;
+ adv7511->status = connector_status_disconnected;
+
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (ret)
+ return ret;
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+ * inactive to wake up the encoder.
+ */
+ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(adv7511->gpio_pd))
+ return PTR_ERR(adv7511->gpio_pd);
+
+ if (adv7511->gpio_pd) {
+ mdelay(5);
+ gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+ }
+
+ adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+ if (IS_ERR(adv7511->regmap))
+ return PTR_ERR(adv7511->regmap);
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "Rev. %d\n", val);
+
+ ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+ packet_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ adv7511_packet_disable(adv7511, 0xffff);
+
+ adv7511->i2c_main = i2c;
+ adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+ if (!adv7511->i2c_edid)
+ return -ENOMEM;
+
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_i2c_unregister_device;
+ }
+
+ /* CEC is unused for now */
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+
+ adv7511_power_off(adv7511);
+
+ i2c_set_clientdata(i2c, adv7511);
+
+ adv7511_set_link_config(adv7511, &link_config);
+
+ return 0;
+
+err_i2c_unregister_device:
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ kfree(adv7511->edid);
+
+ return 0;
+}
+
+static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ encoder->slave_priv = adv7511;
+ encoder->slave_funcs = &adv7511_encoder_funcs;
+
+ adv7511->encoder = &encoder->base;
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+ { "adv7511", 0 },
+ { "adv7511w", 0 },
+ { "adv7513", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", },
+ { .compatible = "adi,adv7511w", },
+ { .compatible = "adi,adv7513", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct drm_i2c_encoder_driver adv7511_driver = {
+ .i2c_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ },
+
+ .encoder_init = adv7511_encoder_init,
+};
+
+static int __init adv7511_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+ drm_i2c_encoder_unregister(&adv7511_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
new file mode 100644
index 000000000000..38515b30cedf
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -0,0 +1,289 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+
+#define ADV7511_REG_CHIP_REVISION 0x00
+#define ADV7511_REG_N0 0x01
+#define ADV7511_REG_N1 0x02
+#define ADV7511_REG_N2 0x03
+#define ADV7511_REG_SPDIF_FREQ 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1 0x05
+#define ADV7511_REG_CTS_AUTOMATIC2 0x06
+#define ADV7511_REG_CTS_MANUAL0 0x07
+#define ADV7511_REG_CTS_MANUAL1 0x08
+#define ADV7511_REG_CTS_MANUAL2 0x09
+#define ADV7511_REG_AUDIO_SOURCE 0x0a
+#define ADV7511_REG_AUDIO_CONFIG 0x0b
+#define ADV7511_REG_I2S_CONFIG 0x0c
+#define ADV7511_REG_I2S_WIDTH 0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
+#define ADV7511_REG_AUDIO_CFG1 0x12
+#define ADV7511_REG_AUDIO_CFG2 0x13
+#define ADV7511_REG_AUDIO_CFG3 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
+#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION 0x3b
+#define ADV7511_REG_VIC_MANUAL 0x3c
+#define ADV7511_REG_VIC_SEND 0x3d
+#define ADV7511_REG_VIC_DETECTED 0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
+#define ADV7511_REG_PACKET_ENABLE0 0x40
+#define ADV7511_REG_POWER 0x41
+#define ADV7511_REG_STATUS 0x42
+#define ADV7511_REG_EDID_I2C_ADDR 0x43
+#define ADV7511_REG_PACKET_ENABLE1 0x44
+#define ADV7511_REG_PACKET_I2C_ADDR 0x45
+#define ADV7511_REG_DSD_ENABLE 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
+#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
+#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
+#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
+#define ADV7511_REG_INT(x) (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV 0x9d
+#define ADV7511_REG_PLL_STATUS 0x9e
+#define ADV7511_REG_HDMI_POWER 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
+#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS 0xb8
+#define ADV7511_REG_BCAPS 0xbe
+#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT 0xc4
+#define ADV7511_REG_DDC_STATUS 0xc8
+#define ADV7511_REG_EDID_READ_CTRL 0xc9
+#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
+#define ADV7511_REG_POWER2 0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV 0xde
+#define ADV7511_REG_ARC_CTRL 0xdf
+#define ADV7511_REG_CEC_I2C_ADDR 0xe1
+#define ADV7511_REG_CEC_CTRL 0xe2
+#define ADV7511_REG_CHIP_ID_HIGH 0xf5
+#define ADV7511_REG_CHIP_ID_LOW 0xf6
+
+#define ADV7511_CSC_ENABLE BIT(7)
+#define ADV7511_CSC_UPDATE_MODE BIT(5)
+
+#define ADV7511_INT0_HPD BIT(7)
+#define ADV7511_INT0_VSYNC BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
+#define ADV7511_INT0_EDID_READY BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR BIT(7)
+#define ADV7511_INT1_BKSV BIT(6)
+#define ADV7511_INT1_CEC_TX_READY BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK 0x2
+#define ADV7511_HDMI_CFG_MODE_DVI 0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
+
+#define ADV7511_AUDIO_SELECT_I2C 0x0
+#define ADV7511_AUDIO_SELECT_SPDIF 0x1
+#define ADV7511_AUDIO_SELECT_DSD 0x2
+#define ADV7511_AUDIO_SELECT_HBR 0x3
+#define ADV7511_AUDIO_SELECT_DST 0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16 0x2
+#define ADV7511_I2S_SAMPLE_LEN_20 0x3
+#define ADV7511_I2S_SAMPLE_LEN_18 0x4
+#define ADV7511_I2S_SAMPLE_LEN_22 0x5
+#define ADV7511_I2S_SAMPLE_LEN_19 0x8
+#define ADV7511_I2S_SAMPLE_LEN_23 0x9
+#define ADV7511_I2S_SAMPLE_LEN_24 0xb
+#define ADV7511_I2S_SAMPLE_LEN_17 0xc
+#define ADV7511_I2S_SAMPLE_LEN_21 0xd
+
+#define ADV7511_SAMPLE_FREQ_44100 0x0
+#define ADV7511_SAMPLE_FREQ_48000 0x2
+#define ADV7511_SAMPLE_FREQ_32000 0x3
+#define ADV7511_SAMPLE_FREQ_88200 0x8
+#define ADV7511_SAMPLE_FREQ_96000 0xa
+#define ADV7511_SAMPLE_FREQ_176400 0xc
+#define ADV7511_SAMPLE_FREQ_192000 0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
+#define ADV7511_STATUS_HPD BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
+#define ADV7511_PACKET_ENABLE_GM BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+
+#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S 0
+#define ADV7511_AUDIO_SOURCE_SPDIF 1
+
+#define ADV7511_I2S_FORMAT_I2S 0
+#define ADV7511_I2S_FORMAT_RIGHT_J 1
+#define ADV7511_I2S_FORMAT_LEFT_J 2
+
+#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+ ADV7511_INPUT_CLOCK_1X,
+ ADV7511_INPUT_CLOCK_2X,
+ ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+ ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+ ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+ ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+ ADV7511_INPUT_SYNC_PULSE_DE = 0,
+ ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+ ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+ ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
+ * the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+ ADV7511_SYNC_POLARITY_PASSTHROUGH,
+ ADV7511_SYNC_POLARITY_LOW,
+ ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth: Number of bits per color component (8, 10 or 12)
+ * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock: The input video clock style (1x, 2x, DDR)
+ * @input_style: The input component arrangement variant
+ * @input_justification: Video input format bit justification
+ * @clock_delay: Clock delay for the input clock (in ps)
+ * @embedded_sync: Video input uses BT.656-style embedded sync
+ * @sync_pulse: Select the sync pulse
+ * @vsync_polarity: vsync input signal configuration
+ * @hsync_polarity: hsync input signal configuration
+ */
+struct adv7511_link_config {
+ unsigned int input_color_depth;
+ enum hdmi_colorspace input_colorspace;
+ enum adv7511_input_clock input_clock;
+ unsigned int input_style;
+ enum adv7511_input_justification input_justification;
+
+ int clock_delay;
+
+ bool embedded_sync;
+ enum adv7511_input_sync_pulse sync_pulse;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+ ADV7511_CSC_SCALING_1 = 0,
+ ADV7511_CSC_SCALING_2 = 1,
+ ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable: Whether to enable color space conversion
+ * @csc_scaling_factor: Color space conversion scaling factor
+ * @csc_coefficents: Color space conversion coefficents
+ * @hdmi_mode: Whether to use HDMI or DVI output mode
+ * @avi_infoframe: HDMI infoframe
+ */
+struct adv7511_video_config {
+ bool csc_enable;
+ enum adv7511_csc_scaling csc_scaling_factor;
+ const uint16_t *csc_coefficents;
+
+ bool hdmi_mode;
+ struct hdmi_avi_infoframe avi_infoframe;
+};
+
+#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a68eff1fb429..659e9265801a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3845,6 +3845,34 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.lanes = 4,
};
+static const struct drm_display_mode auo_b101uan01_mode = {
+ .clock = 154500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 185,
+ .hsync_end = 1920 + 185,
+ .htotal = 1920 + 185 + 925,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 3,
+ .vsync_end = 1200 + 3 + 5,
+ .vtotal = 1200 + 3 + 5 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b101uan01 = {
+ .desc = {
+ .modes = &auo_b101uan01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 272,
+ },
+ },
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.clock = 160000,
.hdisplay = 1200,
@@ -4024,6 +4052,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "auo,b080uan01",
.data = &auo_b080uan01
}, {
+ .compatible = "auo,b101uan01",
+ .data = &auo_b101uan01
+ }, {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
new file mode 100644
index 000000000000..5463d4aa0302
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -0,0 +1,106 @@
+config DRM_XLNX
+ tristate "Xilinx DRM KMS Driver"
+ depends on DRM && OF
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx DRM KMS driver. Choose this option if you have
+ a Xilinx SoCs with hardened display pipeline or soft
+ display pipeline using Xilinx IPs in FPGA. This module
+ provides the kernel mode setting functionalities
+ for Xilinx display drivers.
+
+config DRM_XLNX_BRIDGE
+ tristate "Xilinx DRM KMS bridge"
+ depends on DRM_XLNX
+ help
+ Xilinx DRM KMS bridge. This module provides some interfaces
+ to enable inter-module communication. Choose this option
+ from the provider driver when the Xilinx bridge interface is
+ needed.
+
+config DRM_XLNX_BRIDGE_DEBUG_FS
+ bool "Xilinx DRM KMS bridge debugfs"
+ depends on DEBUG_FS && DRM_XLNX_BRIDGE
+ help
+ Enable the debugfs code for Xilinx bridge. The debugfs code
+ enables debugging or testing related features. It exposes some
+ low level controls to the user space to help testing automation,
+ as well as can enable additional diagnostic or statistical
+ information.
+
+config DRM_ZYNQMP_DPSUB
+ tristate "ZynqMP DP Subsystem Driver"
+ depends on ARCH_ZYNQMP && OF && DRM_XLNX && COMMON_CLK
+ select DMADEVICES
+ select XILINX_DMA_ENGINES
+ select XILINX_DPDMA
+ select PHY_XILINX_ZYNQMP
+ help
+ DRM KMS driver for ZynqMP DP Subsystem controller. Choose
+ this option if you have a Xilinx ZynqMP SoC with DisplayPort
+ subsystem. The driver provides the kernel mode setting
+ functionalities for ZynqMP DP subsystem.
+
+config DRM_XLNX_DSI
+ tristate "Xilinx DRM DSI Subsystem Driver"
+ depends on DRM_XLNX
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_SIMPLE
+ help
+ DRM driver for Xilinx MIPI-DSI.
+
+config DRM_XLNX_MIXER
+ tristate "Xilinx DRM Mixer Driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx Mixer driver.
+
+config DRM_XLNX_PL_DISP
+ tristate "Xilinx DRM PL display driver"
+ depends on DRM_XLNX
+ select VIDEOMODE_HELPERS
+ help
+ DRM driver for Xilinx PL display driver, provides drm
+ crtc and plane object to display pipeline. You need to
+ choose this option if your display pipeline needs one
+ crtc and plane object with single DMA connected.
+
+config DRM_XLNX_SDI
+ tristate "Xilinx DRM SDI Subsystem Driver"
+ depends on DRM_XLNX
+ help
+ DRM driver for Xilinx SDI Tx Subsystem.
+
+config DRM_XLNX_BRIDGE_CSC
+ tristate "Xilinx DRM CSC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for color space converter of VPSS. Choose
+ this option if color space converter is connected to an encoder.
+ The driver provides set/get resolution and color format
+ functionality through bridge layer.
+
+config DRM_XLNX_BRIDGE_SCALER
+ tristate "Xilinx DRM Scaler Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for scaler of VPSS. Choose this option
+ if scaler is connected to an encoder. The driver provides
+ upscaling, down scaling and no scaling functionality through
+ bridge layer.
+
+config DRM_XLNX_BRIDGE_VTC
+ tristate "Xilinx DRM VTC Driver"
+ depends on DRM_XLNX_BRIDGE
+ help
+ DRM brige driver for Xilinx Video Timing Controller. Choose
+ this option to make VTC a part of the CRTC in display pipeline.
+ Currently the support is added to the Xilinx Video Mixer and
+ Xilinx PL display CRTC drivers. This driver provides ability
+ to generate timings through the bridge layer.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
new file mode 100644
index 000000000000..1d80be7d3e70
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -0,0 +1,21 @@
+xlnx_drm-objs += xlnx_crtc.o xlnx_drv.o xlnx_fb.o xlnx_gem.o
+xlnx_drm-$(CONFIG_DRM_XLNX_BRIDGE) += xlnx_bridge.o
+obj-$(CONFIG_DRM_XLNX) += xlnx_drm.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_CSC) += xlnx_csc.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_SCALER) += xlnx_scaler.o
+
+obj-$(CONFIG_DRM_XLNX_BRIDGE_VTC) += xlnx_vtc.o
+
+obj-$(CONFIG_DRM_XLNX_DSI) += xlnx_dsi.o
+
+obj-$(CONFIG_DRM_XLNX_MIXER) += xlnx_mixer.o
+
+obj-$(CONFIG_DRM_XLNX_PL_DISP) += xlnx_pl_disp.o
+
+xlnx-sdi-objs += xlnx_sdi.o xlnx_sdi_timing.o
+obj-$(CONFIG_DRM_XLNX_SDI) += xlnx-sdi.o
+
+zynqmp-dpsub-objs += zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o
+obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.c b/drivers/gpu/drm/xlnx/xlnx_bridge.c
new file mode 100644
index 000000000000..2f1b64795f04
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/list.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * Similar to drm bridge, but this can be used by any DRM driver. There
+ * is no limitation to be used by non DRM drivers as well. No complex topology
+ * is modeled, thus it's assumed that the Xilinx bridge device is directly
+ * attached to client. The client should call Xilinx bridge functions explicitly
+ * where it's needed, as opposed to drm bridge functions which are called
+ * implicitly by DRM core.
+ * One Xlnx bridge can be owned by one driver at a time.
+ */
+
+/**
+ * struct xlnx_bridge_helper - Xilinx bridge helper
+ * @xlnx_bridges: list of Xilinx bridges
+ * @lock: lock to protect @xlnx_crtcs
+ * @refcnt: reference count
+ * @error: flag if in error state
+ */
+struct xlnx_bridge_helper {
+ struct list_head xlnx_bridges;
+ struct mutex lock; /* lock for @xlnx_bridges */
+ unsigned int refcnt;
+ bool error;
+};
+
+static struct xlnx_bridge_helper helper;
+
+struct videomode;
+/*
+ * Client functions
+ */
+
+/**
+ * xlnx_bridge_enable - Enable the bridge
+ * @bridge: bridge to enable
+ *
+ * Enable bridge.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->enable)
+ return bridge->enable(bridge);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_enable);
+
+/**
+ * xlnx_bridge_disable - Disable the bridge
+ * @bridge: bridge to disable
+ *
+ * Disable bridge.
+ */
+void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+ if (!bridge)
+ return;
+
+ if (helper.error)
+ return;
+
+ if (bridge->disable)
+ bridge->disable(bridge);
+}
+EXPORT_SYMBOL(xlnx_bridge_disable);
+
+/**
+ * xlnx_bridge_set_input - Set the input of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge input with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_input)
+ return bridge->set_input(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_input);
+
+/**
+ * xlnx_bridge_get_input_fmts - Get the supported input formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported input bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_input_fmts)
+ return bridge->get_input_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_input_fmts);
+
+/**
+ * xlnx_bridge_set_output - Set the output of @bridge
+ * @bridge: bridge to set
+ * @width: width
+ * @height: height
+ * @bus_fmt: bus format (ex, MEDIA_BUS_FMT_*);
+ *
+ * Set the bridge output with height / width / format.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_output)
+ return bridge->set_output(bridge, width, height, bus_fmt);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_output);
+
+/**
+ * xlnx_bridge_get_output_fmts - Get the supported output formats
+ * @bridge: bridge to set
+ * @fmts: pointer to formats
+ * @count: pointer to format count
+ *
+ * Get the list of supported output bus formats.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->get_output_fmts)
+ return bridge->get_output_fmts(bridge, fmts, count);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_get_output_fmts);
+
+/**
+ * xlnx_bridge_set_timing - Set the video timing
+ * @bridge: bridge to set
+ * @vm: Videomode
+ *
+ * Set the video mode so that timing can be generated using this
+ * by the video timing controller.
+ *
+ * Return: 0 on success. -ENOENT if no callback, -EFAULT if in error state,
+ * or return code from callback.
+ */
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm)
+{
+ if (!bridge)
+ return 0;
+
+ if (helper.error)
+ return -EFAULT;
+
+ if (bridge->set_timing) {
+ bridge->set_timing(bridge, vm);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(xlnx_bridge_set_timing);
+
+/**
+ * of_xlnx_bridge_get - Get the corresponding Xlnx bridge instance
+ * @bridge_np: The device node of the bridge device
+ *
+ * The function walks through the Xlnx bridge list of @drm, and return
+ * if any registered bridge matches the device node. The returned
+ * bridge will not be accesible by others.
+ *
+ * Return: the matching Xlnx bridge instance, or NULL
+ */
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ struct xlnx_bridge *found = NULL;
+ struct xlnx_bridge *bridge;
+
+ if (helper.error)
+ return NULL;
+
+ mutex_lock(&helper.lock);
+ list_for_each_entry(bridge, &helper.xlnx_bridges, list) {
+ if (bridge->of_node == bridge_np && !bridge->owned) {
+ found = bridge;
+ bridge->owned = true;
+ break;
+ }
+ }
+ mutex_unlock(&helper.lock);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_get);
+
+/**
+ * of_xlnx_bridge_put - Put the Xlnx bridge instance
+ * @bridge: Xlnx bridge instance to release
+ *
+ * Return the @bridge. After this, the bridge will be available for
+ * other drivers to use.
+ */
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+ if (WARN_ON(helper.error))
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->owned);
+ bridge->owned = false;
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(of_xlnx_bridge_put);
+
+#ifdef CONFIG_DRM_XLNX_BRIDGE_DEBUG_FS
+
+#include <linux/debugfs.h>
+
+struct xlnx_bridge_debugfs_dir {
+ struct dentry *dir;
+ int ref_cnt;
+};
+
+static struct xlnx_bridge_debugfs_dir *dir;
+
+struct xlnx_bridge_debugfs_file {
+ struct dentry *file;
+ const char *status;
+};
+
+#define XLNX_BRIDGE_DEBUGFS_MAX_BYTES 16
+
+static ssize_t xlnx_bridge_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+ int ret;
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (*pos != 0)
+ return 0;
+
+ size = min(size, strlen(bridge->debugfs_file->status));
+ ret = copy_to_user(buf, bridge->debugfs_file->status, size);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xlnx_bridge_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct xlnx_bridge *bridge = f->f_inode->i_private;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ if (!strncmp(buf, "enable", 5)) {
+ xlnx_bridge_enable(bridge);
+ } else if (!strncmp(buf, "disable", 6)) {
+ xlnx_bridge_disable(bridge);
+ } else if (!strncmp(buf, "set_input", 3)) {
+ char *cmd, **tmp;
+ char *w, *h, *f;
+ u32 width, height, fmt;
+ int ret = -EINVAL;
+
+ cmd = kzalloc(size, GFP_KERNEL);
+ ret = strncpy_from_user(cmd, buf, size);
+ if (ret < 0) {
+ pr_err("%s %d failed to copy the command %s\n",
+ __func__, __LINE__, buf);
+ return ret;
+ }
+
+ tmp = &cmd;
+ strsep(tmp, " ");
+ w = strsep(tmp, " ");
+ h = strsep(tmp, " ");
+ f = strsep(tmp, " ");
+ if (w && h && f) {
+ ret = kstrtouint(w, 0, &width);
+ ret |= kstrtouint(h, 0, &height);
+ ret |= kstrtouint(f, 0, &fmt);
+ }
+
+ kfree(cmd);
+ if (ret) {
+ pr_err("%s %d invalid command: %s\n",
+ __func__, __LINE__, buf);
+ return -EINVAL;
+ }
+ xlnx_bridge_set_input(bridge, width, height, fmt);
+ }
+
+ return size;
+}
+
+static const struct file_operations xlnx_bridge_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = xlnx_bridge_debugfs_read,
+ .write = xlnx_bridge_debugfs_write,
+};
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ struct xlnx_bridge_debugfs_file *file;
+ char file_name[32];
+
+ file = kzalloc(sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
+
+ snprintf(file_name, sizeof(file_name), "xlnx_bridge-%s",
+ bridge->of_node->name);
+ file->file = debugfs_create_file(file_name, 0444, dir->dir, bridge,
+ &xlnx_bridge_debugfs_fops);
+ bridge->debugfs_file = file;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+ debugfs_remove(bridge->debugfs_file->file);
+ kfree(bridge->debugfs_file);
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ if (dir) {
+ dir->ref_cnt++;
+ return 0;
+ }
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ dir->dir = debugfs_create_dir("xlnx-bridge", NULL);
+ if (!dir->dir)
+ return -ENODEV;
+ dir->ref_cnt++;
+
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+ if (--dir->ref_cnt)
+ return;
+
+ debugfs_remove_recursive(dir->dir);
+ dir = NULL;
+}
+
+#else
+
+static int xlnx_bridge_debugfs_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+static int xlnx_bridge_debugfs_init(void)
+{
+ return 0;
+}
+
+static void xlnx_bridge_debugfs_fini(void)
+{
+}
+
+#endif
+
+/*
+ * Provider functions
+ */
+
+/**
+ * xlnx_bridge_register - Register the bridge instance
+ * @bridge: Xlnx bridge instance to register
+ *
+ * Register @bridge to be available for clients.
+ *
+ * Return: 0 on success. -EPROBE_DEFER if helper is not initialized, or
+ * -EFAULT if in error state.
+ */
+int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ if (!helper.refcnt)
+ return -EPROBE_DEFER;
+
+ if (helper.error)
+ return -EFAULT;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(!bridge->of_node);
+ bridge->owned = false;
+ xlnx_bridge_debugfs_register(bridge);
+ list_add_tail(&bridge->list, &helper.xlnx_bridges);
+ mutex_unlock(&helper.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_register);
+
+/**
+ * xlnx_bridge_unregister - Unregister the bridge instance
+ * @bridge: Xlnx bridge instance to unregister
+ *
+ * Unregister @bridge. The bridge shouldn't be owned by any client
+ * at this point.
+ */
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+ if (helper.error)
+ return;
+
+ mutex_lock(&helper.lock);
+ WARN_ON(bridge->owned);
+ xlnx_bridge_debugfs_unregister(bridge);
+ list_del(&bridge->list);
+ mutex_unlock(&helper.lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_bridge_unregister);
+
+/*
+ * Internal functions: used by Xlnx DRM
+ */
+
+/**
+ * xlnx_bridge_helper_init - Initialize the bridge helper
+ * @void: No arg
+ *
+ * Initialize the bridge helper or increment the reference count
+ * if already initialized.
+ *
+ * Return: 0 on success, or -EFAULT if in error state.
+ */
+int xlnx_bridge_helper_init(void)
+{
+ if (helper.refcnt++ > 0) {
+ if (helper.error)
+ return -EFAULT;
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&helper.xlnx_bridges);
+ mutex_init(&helper.lock);
+ helper.error = false;
+
+ if (xlnx_bridge_debugfs_init())
+ pr_err("failed to init xlnx bridge debugfs\n");
+
+ return 0;
+}
+
+/**
+ * xlnx_bridge_helper_fini - Release the bridge helper
+ *
+ * Clean up or decrement the reference of the bridge helper.
+ */
+void xlnx_bridge_helper_fini(void)
+{
+ if (--helper.refcnt > 0)
+ return;
+
+ xlnx_bridge_debugfs_fini();
+
+ if (WARN_ON(!list_empty(&helper.xlnx_bridges))) {
+ helper.error = true;
+ pr_err("any further xlnx bridge call will fail\n");
+ }
+
+ mutex_destroy(&helper.lock);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_bridge.h b/drivers/gpu/drm/xlnx/xlnx_bridge.h
new file mode 100644
index 000000000000..fb1b4b748346
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_bridge.h
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM bridge header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_BRIDGE_H_
+#define _XLNX_BRIDGE_H_
+
+struct videomode;
+
+struct xlnx_bridge_debugfs_file;
+
+/**
+ * struct xlnx_bridge - Xilinx bridge device
+ * @list: list node for Xilinx bridge device list
+ * @of_node: OF node for the bridge
+ * @owned: flag if the bridge is owned
+ * @enable: callback to enable the bridge
+ * @disable: callback to disable the bridge
+ * @set_input: callback to set the input
+ * @get_input_fmts: callback to get supported input formats.
+ * @set_output: callback to set the output
+ * @get_output_fmts: callback to get supported output formats.
+ * @set_timing: callback to set timing in connected video timing controller.
+ * @debugfs_file: for debugfs support
+ */
+struct xlnx_bridge {
+ struct list_head list;
+ struct device_node *of_node;
+ bool owned;
+ int (*enable)(struct xlnx_bridge *bridge);
+ void (*disable)(struct xlnx_bridge *bridge);
+ int (*set_input)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_input_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_output)(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+ int (*get_output_fmts)(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+ int (*set_timing)(struct xlnx_bridge *bridge, struct videomode *vm);
+ struct xlnx_bridge_debugfs_file *debugfs_file;
+};
+
+#if IS_ENABLED(CONFIG_DRM_XLNX_BRIDGE)
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_bridge_helper;
+
+int xlnx_bridge_helper_init(void);
+void xlnx_bridge_helper_fini(void);
+
+/*
+ * Helper functions: used by client driver
+ */
+
+int xlnx_bridge_enable(struct xlnx_bridge *bridge);
+void xlnx_bridge_disable(struct xlnx_bridge *bridge);
+int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt);
+int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count);
+int xlnx_bridge_set_timing(struct xlnx_bridge *bridge, struct videomode *vm);
+struct xlnx_bridge *of_xlnx_bridge_get(struct device_node *bridge_np);
+void of_xlnx_bridge_put(struct xlnx_bridge *bridge);
+
+/*
+ * Bridge registration: used by bridge driver
+ */
+
+int xlnx_bridge_register(struct xlnx_bridge *bridge);
+void xlnx_bridge_unregister(struct xlnx_bridge *bridge);
+
+#else /* CONFIG_DRM_XLNX_BRIDGE */
+
+struct xlnx_bridge_helper;
+
+static inline inline int xlnx_bridge_helper_init(void)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_helper_fini(void)
+{
+}
+
+static inline int xlnx_bridge_enable(struct xlnx_bridge *bridge)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline void xlnx_bridge_disable(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline int xlnx_bridge_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ if (bridge)
+ return -ENODEV;
+ return 0;
+}
+
+static inline struct xlnx_bridge *
+of_xlnx_bridge_get(struct device_node *bridge_np)
+{
+ return NULL;
+}
+
+static inline void of_xlnx_bridge_put(struct xlnx_bridge *bridge)
+{
+}
+
+static inline int xlnx_bridge_register(struct xlnx_bridge *bridge)
+{
+ return 0;
+}
+
+static inline void xlnx_bridge_unregister(struct xlnx_bridge *bridge)
+{
+}
+
+#endif /* CONFIG_DRM_XLNX_BRIDGE */
+
+#endif /* _XLNX_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.c b/drivers/gpu/drm/xlnx/xlnx_crtc.c
new file mode 100644
index 000000000000..fd718ca5bf04
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_crtc.h>
+
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The Xilinx CRTC layer is to enable the custom interface to CRTC drivers.
+ * The interface is used by Xilinx DRM driver where it needs CRTC
+ * functionailty. CRTC drivers should attach the desired callbacks
+ * to struct xlnx_crtc and register the xlnx_crtc with correcsponding
+ * drm_device. It's highly recommended CRTC drivers register all callbacks
+ * even though many of them are optional.
+ * The CRTC helper simply walks through the registered CRTC device,
+ * and call the callbacks.
+ */
+
+/**
+ * struct xlnx_crtc_helper - Xilinx CRTC helper
+ * @xlnx_crtcs: list of Xilinx CRTC devices
+ * @lock: lock to protect @xlnx_crtcs
+ * @drm: back pointer to DRM core
+ */
+struct xlnx_crtc_helper {
+ struct list_head xlnx_crtcs;
+ struct mutex lock; /* lock for @xlnx_crtcs */
+ struct drm_device *drm;
+};
+
+#define XLNX_CRTC_MAX_HEIGHT_WIDTH INT_MAX
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ unsigned int align = 1, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_align) {
+ tmp = crtc->get_align(crtc);
+ align = ALIGN(align, tmp);
+ }
+ }
+
+ return align;
+}
+
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u64 mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8), tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_dma_mask) {
+ tmp = crtc->get_dma_mask(crtc);
+ mask = min(mask, tmp);
+ }
+ }
+
+ return mask;
+}
+
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_width) {
+ tmp = crtc->get_max_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ int height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_max_height) {
+ tmp = crtc->get_max_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 format = 0, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_format) {
+ tmp = crtc->get_format(crtc);
+ if (format && format != tmp)
+ return 0;
+ format = tmp;
+ }
+ }
+
+ return format;
+}
+
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 width = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_width) {
+ tmp = crtc->get_cursor_width(crtc);
+ width = min(width, tmp);
+ }
+ }
+
+ return width;
+}
+
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper)
+{
+ struct xlnx_crtc *crtc;
+ u32 height = XLNX_CRTC_MAX_HEIGHT_WIDTH, tmp;
+
+ list_for_each_entry(crtc, &helper->xlnx_crtcs, list) {
+ if (crtc->get_cursor_height) {
+ tmp = crtc->get_cursor_height(crtc);
+ height = min(height, tmp);
+ }
+ }
+
+ return height;
+}
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm)
+{
+ struct xlnx_crtc_helper *helper;
+
+ helper = devm_kzalloc(drm->dev, sizeof(*helper), GFP_KERNEL);
+ if (!helper)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&helper->xlnx_crtcs);
+ mutex_init(&helper->lock);
+ helper->drm = drm;
+
+ return helper;
+}
+
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper)
+{
+ if (WARN_ON(helper->drm != drm))
+ return;
+
+ if (WARN_ON(!list_empty(&helper->xlnx_crtcs)))
+ return;
+
+ mutex_destroy(&helper->lock);
+ devm_kfree(drm->dev, helper);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_add_tail(&crtc->list, &helper->xlnx_crtcs);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_register);
+
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc)
+{
+ struct xlnx_crtc_helper *helper = xlnx_get_crtc_helper(drm);
+
+ mutex_lock(&helper->lock);
+ list_del(&crtc->list);
+ mutex_unlock(&helper->lock);
+}
+EXPORT_SYMBOL_GPL(xlnx_crtc_unregister);
diff --git a/drivers/gpu/drm/xlnx/xlnx_crtc.h b/drivers/gpu/drm/xlnx/xlnx_crtc.h
new file mode 100644
index 000000000000..9ab57594aba8
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_crtc.h
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM crtc header
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_CRTC_H_
+#define _XLNX_CRTC_H_
+
+/**
+ * struct xlnx_crtc - Xilinx CRTC device
+ * @crtc: DRM CRTC device
+ * @list: list node for Xilinx CRTC device list
+ * @get_align: Get the alignment requirement of CRTC device
+ * @get_dma_mask: Get the dma mask of CRTC device
+ * @get_max_width: Get the maximum supported width
+ * @get_max_height: Get the maximum supported height
+ * @get_format: Get the current format of CRTC device
+ * @get_cursor_width: Get the cursor width
+ * @get_cursor_height: Get the cursor height
+ */
+struct xlnx_crtc {
+ struct drm_crtc crtc;
+ struct list_head list;
+ unsigned int (*get_align)(struct xlnx_crtc *crtc);
+ u64 (*get_dma_mask)(struct xlnx_crtc *crtc);
+ int (*get_max_width)(struct xlnx_crtc *crtc);
+ int (*get_max_height)(struct xlnx_crtc *crtc);
+ uint32_t (*get_format)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_width)(struct xlnx_crtc *crtc);
+ uint32_t (*get_cursor_height)(struct xlnx_crtc *crtc);
+};
+
+/*
+ * Helper functions: used within Xlnx DRM
+ */
+
+struct xlnx_crtc_helper;
+
+unsigned int xlnx_crtc_helper_get_align(struct xlnx_crtc_helper *helper);
+u64 xlnx_crtc_helper_get_dma_mask(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_width(struct xlnx_crtc_helper *helper);
+int xlnx_crtc_helper_get_max_height(struct xlnx_crtc_helper *helper);
+uint32_t xlnx_crtc_helper_get_format(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_width(struct xlnx_crtc_helper *helper);
+u32 xlnx_crtc_helper_get_cursor_height(struct xlnx_crtc_helper *helper);
+
+struct xlnx_crtc_helper *xlnx_crtc_helper_init(struct drm_device *drm);
+void xlnx_crtc_helper_fini(struct drm_device *drm,
+ struct xlnx_crtc_helper *helper);
+
+/*
+ * CRTC registration: used by other sub-driver modules
+ */
+
+static inline struct xlnx_crtc *to_xlnx_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct xlnx_crtc, crtc);
+}
+
+void xlnx_crtc_register(struct drm_device *drm, struct xlnx_crtc *crtc);
+void xlnx_crtc_unregister(struct drm_device *drm, struct xlnx_crtc *crtc);
+
+#endif /* _XLNX_CRTC_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_csc.c b/drivers/gpu/drm/xlnx/xlnx_csc.c
new file mode 100644
index 000000000000..1d4341dce570
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_csc.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS CSC DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+/* Register offset */
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+#define XV_CSC_SCALE_FACTOR (4096)
+#define XV_CSC_DIVISOR (10000)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+#define XCSC_STREAM_OFF (0)
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+
+#define XCSC_MIN_WIDTH (64)
+#define XCSC_MAX_WIDTH (8192)
+#define XCSC_MIN_HEIGHT (64)
+#define XCSC_MAX_HEIGHT (4320)
+
+static const u32 xilinx_csc_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* vpss_csc_color_fmt - Color format type */
+enum vpss_csc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/**
+ * struct xilinx_csc - Core configuration of csc device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @cft_in: input color format
+ * @cft_out: output color format
+ * @color_depth: color depth
+ * @k_hw: array of hardware values
+ * @clip_max: clipping maximum value
+ * @width: width of the video
+ * @height: height of video
+ * @max_width: maximum number of pixels in a line
+ * @max_height: maximum number of lines per frame
+ * @rst_gpio: Handle to GPIO specifier to assert/de-assert the reset line
+ * @aclk: IP clock struct
+ */
+struct xilinx_csc {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ enum vpss_csc_color_fmt cft_in;
+ enum vpss_csc_color_fmt cft_out;
+ u32 color_depth;
+ s32 k_hw[3][4];
+ s32 clip_max;
+ u32 width;
+ u32 height;
+ u32 max_width;
+ u32 max_height;
+ struct gpio_desc *rst_gpio;
+ struct clk *aclk;
+};
+
+static inline void xilinx_csc_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_csc_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_csc *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_csc, bridge);
+}
+
+static void xilinx_csc_write_rgb_3x3(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_K11, csc->k_hw[0][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K12, csc->k_hw[0][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K13, csc->k_hw[0][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K21, csc->k_hw[1][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K22, csc->k_hw[1][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K23, csc->k_hw[1][2]);
+ xilinx_csc_write(csc->base, XV_CSC_K31, csc->k_hw[2][0]);
+ xilinx_csc_write(csc->base, XV_CSC_K32, csc->k_hw[2][1]);
+ xilinx_csc_write(csc->base, XV_CSC_K33, csc->k_hw[2][2]);
+}
+
+static void xilinx_csc_write_rgb_offset(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_ROFFSET, csc->k_hw[0][3]);
+ xilinx_csc_write(csc->base, XV_CSC_GOFFSET, csc->k_hw[1][3]);
+ xilinx_csc_write(csc->base, XV_CSC_BOFFSET, csc->k_hw[2][3]);
+}
+
+static void xilinx_csc_write_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write_rgb_3x3(csc);
+ xilinx_csc_write_rgb_offset(csc);
+}
+
+static void xcsc_set_default_state(struct xilinx_csc *csc)
+{
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+
+ /* This represents an identity matrix mutliped by 2^12 */
+ csc->k_hw[0][0] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 0;
+ csc->k_hw[1][0] = 0;
+ csc->k_hw[1][1] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[1][2] = 0;
+ csc->k_hw[2][0] = 0;
+ csc->k_hw[2][1] = 0;
+ csc->k_hw[2][2] = XV_CSC_SCALE_FACTOR;
+ csc->k_hw[0][3] = 0;
+ csc->k_hw[1][3] = 0;
+ csc->k_hw[2][3] = 0;
+ csc->clip_max = ((1 << csc->color_depth) - 1);
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void xcsc_ycrcb_to_rgb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 0;
+ csc->k_hw[0][2] = 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = 0;
+ csc->k_hw[0][3] = -248 * bpc_scale;
+ csc->k_hw[1][3] = 77 * bpc_scale;
+ csc->k_hw[2][3] = -289 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+static void xcsc_rgb_to_ycrcb(struct xilinx_csc *csc, s32 *clip_max)
+{
+ u16 bpc_scale = (1 << (csc->color_depth - 8));
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations.
+ *
+ * Coefficients valid only for BT 709
+ */
+ csc->k_hw[0][0] = 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][1] = 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][2] = 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][0] = -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][1] = -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[1][2] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][0] = 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][1] = -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[2][2] = -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR;
+ csc->k_hw[0][3] = 16 * bpc_scale;
+ csc->k_hw[1][3] = 128 * bpc_scale;
+ csc->k_hw[2][3] = 128 * bpc_scale;
+ *clip_max = ((1 << csc->color_depth) - 1);
+}
+
+/**
+ * xcsc_set_coeff- Sets the coefficients
+ * @csc: Pointer to csc device structure
+ *
+ * This function set the coefficients
+ *
+ */
+static void xcsc_set_coeff(struct xilinx_csc *csc)
+{
+ xilinx_csc_write(csc->base, XV_CSC_INVIDEOFORMAT, csc->cft_in);
+ xilinx_csc_write(csc->base, XV_CSC_OUTVIDEOFORMAT, csc->cft_out);
+ xilinx_csc_write_coeff(csc);
+ xilinx_csc_write(csc->base, XV_CSC_CLIPMAX, csc->clip_max);
+ xilinx_csc_write(csc->base, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+/**
+ * xilinx_csc_bridge_enable - enabes csc core
+ * @bridge: bridge instance
+ *
+ * This function enables the csc core
+ *
+ * Return: 0 on success.
+ *
+ */
+static int xilinx_csc_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_disable - disables csc core
+ * @bridge: bridge instance
+ *
+ * This function disables the csc core
+ */
+static void xilinx_csc_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xilinx_csc_write(csc->base, XV_CSC_AP_CTRL, XCSC_STREAM_OFF);
+ /* Reset the Global IP Reset through GPIO */
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+}
+
+/**
+ * xilinx_csc_bridge_set_input - Sets the input parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_input(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ xcsc_set_default_state(csc);
+
+ if (height > csc->max_height || height < XCSC_MIN_HEIGHT)
+ return -EINVAL;
+
+ if (width > csc->max_width || width < XCSC_MIN_WIDTH)
+ return -EINVAL;
+
+ csc->height = height;
+ csc->width = width;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_dbg(csc->dev, "unsupported input video format\n");
+ return -EINVAL;
+ }
+
+ xilinx_csc_write(csc->base, XV_CSC_WIDTH, width);
+ xilinx_csc_write(csc->base, XV_CSC_HEIGHT, height);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_input_fmts - input formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_set_output - Sets the output parameters of csc
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of csc
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_csc_bridge_set_output(struct xlnx_bridge *bridge, u32 width,
+ u32 height, u32 bus_fmt)
+{
+ struct xilinx_csc *csc = bridge_to_layer(bridge);
+
+ if (width != csc->width || height != csc->height)
+ return -EINVAL;
+
+ switch (bus_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ csc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(csc->dev, "Media Format Out : RGB");
+ if (csc->cft_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(csc->dev, "Media Format Out : YUV 444");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ csc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(csc->dev, "Media Format Out : YUV 422");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ csc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(csc->dev, "Media Format Out : YUV 420");
+ if (csc->cft_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(csc, &csc->clip_max);
+ break;
+ default:
+ dev_info(csc->dev, "unsupported output video format\n");
+ return -EINVAL;
+ }
+ xcsc_set_coeff(csc);
+
+ return 0;
+}
+
+/**
+ * xilinx_csc_bridge_get_output_fmts - output formats supported by csc
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information csc
+ * Return: 0 on success.
+ */
+static int xilinx_csc_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_csc_video_fmts;
+ *count = ARRAY_SIZE(xilinx_csc_video_fmts);
+ return 0;
+}
+
+static int xcsc_parse_of(struct xilinx_csc *csc)
+{
+ int ret;
+ struct device_node *node = csc->dev->of_node;
+
+ csc->aclk = devm_clk_get(csc->dev, NULL);
+ if (IS_ERR(csc->aclk)) {
+ ret = PTR_ERR(csc->aclk);
+ dev_err(csc->dev, "failed to get aclk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &csc->color_depth);
+ if (ret < 0) {
+ dev_info(csc->dev, "video width not present in DT\n");
+ return ret;
+ }
+ if (csc->color_depth != 8 && csc->color_depth != 10 &&
+ csc->color_depth != 12 && csc->color_depth != 16) {
+ dev_err(csc->dev, "Invalid video width in DT\n");
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ csc->rst_gpio = devm_gpiod_get(csc->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(csc->rst_gpio)) {
+ if (PTR_ERR(csc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(csc->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(csc->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height", &csc->max_height);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (csc->max_height > XCSC_MAX_HEIGHT ||
+ csc->max_height < XCSC_MIN_HEIGHT) {
+ dev_err(csc->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width", &csc->max_width);
+ if (ret < 0) {
+ dev_err(csc->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (csc->max_width > XCSC_MAX_WIDTH ||
+ csc->max_width < XCSC_MIN_WIDTH) {
+ dev_err(csc->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xilinx_csc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_csc *csc;
+ int ret;
+
+ csc = devm_kzalloc(dev, sizeof(*csc), GFP_KERNEL);
+ if (!csc)
+ return -ENOMEM;
+
+ csc->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csc->base))
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, csc);
+ ret = xcsc_parse_of(csc);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(csc->aclk);
+ if (ret) {
+ dev_err(csc->dev, "failed to enable clock %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(csc->rst_gpio, XCSC_RESET_DEASSERT);
+ csc->bridge.enable = &xilinx_csc_bridge_enable;
+ csc->bridge.disable = &xilinx_csc_bridge_disable;
+ csc->bridge.set_input = &xilinx_csc_bridge_set_input;
+ csc->bridge.get_input_fmts = &xilinx_csc_bridge_get_input_fmts;
+ csc->bridge.set_output = &xilinx_csc_bridge_set_output;
+ csc->bridge.get_output_fmts = &xilinx_csc_bridge_get_output_fmts;
+ csc->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&csc->bridge);
+ if (ret) {
+ dev_info(csc->dev, "Bridge registration failed\n");
+ goto err_clk;
+ }
+
+ dev_info(csc->dev, "Xilinx VPSS CSC DRM experimental driver probed\n");
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(csc->aclk);
+ return ret;
+}
+
+static int xilinx_csc_remove(struct platform_device *pdev)
+{
+ struct xilinx_csc *csc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&csc->bridge);
+ clk_disable_unprepare(csc->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_csc_of_match[] = {
+ { .compatible = "xlnx,vpss-csc"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xilinx_csc_of_match);
+
+static struct platform_driver csc_bridge_driver = {
+ .probe = xilinx_csc_probe,
+ .remove = xilinx_csc_remove,
+ .driver = {
+ .name = "xlnx,csc-bridge",
+ .of_match_table = xilinx_csc_of_match,
+ },
+};
+
+module_platform_driver(csc_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA CSC Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.c b/drivers/gpu/drm/xlnx/xlnx_drv.c
new file mode 100644
index 000000000000..b6a31ba034c9
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Driver
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+#include "xlnx_gem.h"
+
+#define DRIVER_NAME "xlnx"
+#define DRIVER_DESC "Xilinx DRM KMS Driver"
+#define DRIVER_DATE "20130509"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static uint xlnx_fbdev_vres = 2;
+module_param_named(fbdev_vres, xlnx_fbdev_vres, uint, 0444);
+MODULE_PARM_DESC(fbdev_vres,
+ "fbdev virtual resolution multiplier for fb (default: 2)");
+
+/**
+ * struct xlnx_drm - Xilinx DRM private data
+ * @drm: DRM core
+ * @crtc: Xilinx DRM CRTC helper
+ * @fb: DRM fb helper
+ * @master: logical master device for pipeline
+ * @suspend_state: atomic state for suspend / resume
+ * @master_count: Counter to track number of fake master instances
+ */
+struct xlnx_drm {
+ struct drm_device *drm;
+ struct xlnx_crtc_helper *crtc;
+ struct drm_fb_helper *fb;
+ struct platform_device *master;
+ struct drm_atomic_state *suspend_state;
+ u32 master_count;
+};
+
+/**
+ * xlnx_get_crtc_helper - Return the crtc helper instance
+ * @drm: DRM device
+ *
+ * Return: the crtc helper instance
+ */
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_drm->crtc;
+}
+
+/**
+ * xlnx_get_align - Return the align requirement through CRTC helper
+ * @drm: DRM device
+ *
+ * Return: the alignment requirement
+ */
+unsigned int xlnx_get_align(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+}
+
+/**
+ * xlnx_get_format - Return the current format of CRTC
+ * @drm: DRM device
+ *
+ * Return: the current CRTC format
+ */
+uint32_t xlnx_get_format(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ return xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+}
+
+static void xlnx_output_poll_changed(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_hotplug_event(xlnx_drm->fb);
+}
+
+static const struct drm_mode_config_funcs xlnx_mode_config_funcs = {
+ .fb_create = xlnx_fb_create,
+ .output_poll_changed = xlnx_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void xlnx_mode_config_init(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+ struct xlnx_crtc_helper *crtc = xlnx_drm->crtc;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = xlnx_crtc_helper_get_max_width(crtc);
+ drm->mode_config.max_height = xlnx_crtc_helper_get_max_height(crtc);
+ drm->mode_config.cursor_width =
+ xlnx_crtc_helper_get_cursor_width(crtc);
+ drm->mode_config.cursor_height =
+ xlnx_crtc_helper_get_cursor_height(crtc);
+}
+
+static int xlnx_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct xlnx_drm *xlnx_drm = dev->dev_private;
+
+ /* This is a hacky way to allow the root user to run as a master */
+ if (!(drm_is_primary_client(file) && !dev->master) &&
+ !file->is_master && capable(CAP_SYS_ADMIN)) {
+ file->is_master = 1;
+ xlnx_drm->master_count++;
+ }
+
+ return 0;
+}
+
+static int xlnx_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file = filp->private_data;
+ struct drm_minor *minor = file->minor;
+ struct drm_device *drm = minor->dev;
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (file->is_master && xlnx_drm->master_count) {
+ xlnx_drm->master_count--;
+ file->is_master = 0;
+ }
+
+ return drm_release(inode, filp);
+}
+
+static void xlnx_lastclose(struct drm_device *drm)
+{
+ struct xlnx_drm *xlnx_drm = drm->dev_private;
+
+ if (xlnx_drm->fb)
+ drm_fb_helper_restore_fbdev_mode_unlocked(xlnx_drm->fb);
+}
+
+static const struct file_operations xlnx_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = xlnx_drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver xlnx_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
+ DRIVER_ATOMIC,
+ .open = xlnx_drm_open,
+ .lastclose = xlnx_lastclose,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = xlnx_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .fops = &xlnx_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int xlnx_bind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm;
+ struct drm_device *drm;
+ const struct drm_format_info *info;
+ struct platform_device *master = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev->parent);
+ int ret;
+ u32 format;
+
+ drm = drm_dev_alloc(&xlnx_drm_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ xlnx_drm = devm_kzalloc(drm->dev, sizeof(*xlnx_drm), GFP_KERNEL);
+ if (!xlnx_drm) {
+ ret = -ENOMEM;
+ goto err_drm;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &xlnx_mode_config_funcs;
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize vblank\n");
+ goto err_xlnx_drm;
+ }
+
+ drm->irq_enabled = 1;
+ drm->dev_private = xlnx_drm;
+ xlnx_drm->drm = drm;
+ xlnx_drm->master = master;
+ drm_kms_helper_poll_init(drm);
+ platform_set_drvdata(master, xlnx_drm);
+
+ xlnx_drm->crtc = xlnx_crtc_helper_init(drm);
+ if (IS_ERR(xlnx_drm->crtc)) {
+ ret = PTR_ERR(xlnx_drm->crtc);
+ goto err_xlnx_drm;
+ }
+
+ ret = component_bind_all(&master->dev, drm);
+ if (ret)
+ goto err_crtc;
+
+ xlnx_mode_config_init(drm);
+ drm_mode_config_reset(drm);
+ dma_set_mask(drm->dev, xlnx_crtc_helper_get_dma_mask(xlnx_drm->crtc));
+
+ format = xlnx_crtc_helper_get_format(xlnx_drm->crtc);
+ info = drm_format_info(format);
+ if (info && info->depth && info->cpp[0]) {
+ unsigned int align;
+
+ align = xlnx_crtc_helper_get_align(xlnx_drm->crtc);
+ xlnx_drm->fb = xlnx_fb_init(drm, info->cpp[0] * 8, 1, align,
+ xlnx_fbdev_vres);
+ if (IS_ERR(xlnx_drm->fb)) {
+ dev_err(&pdev->dev,
+ "failed to initialize drm fb\n");
+ xlnx_drm->fb = NULL;
+ }
+ } else {
+ /* fbdev emulation is optional */
+ dev_info(&pdev->dev, "fbdev is not initialized\n");
+ }
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_fb;
+
+ return 0;
+
+err_fb:
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(drm->dev, drm);
+err_crtc:
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+err_xlnx_drm:
+ drm_mode_config_cleanup(drm);
+err_drm:
+ drm_dev_put(drm);
+ return ret;
+}
+
+static void xlnx_unbind(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_dev_unregister(drm);
+ if (xlnx_drm->fb)
+ xlnx_fb_fini(xlnx_drm->fb);
+ component_unbind_all(&xlnx_drm->master->dev, drm);
+ xlnx_crtc_helper_fini(drm, xlnx_drm->crtc);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+ drm_dev_put(drm);
+}
+
+static const struct component_master_ops xlnx_master_ops = {
+ .bind = xlnx_bind,
+ .unbind = xlnx_unbind,
+};
+
+static int xlnx_of_component_probe(struct device *master_dev,
+ int (*compare_of)(struct device *, void *),
+ const struct component_master_ops *m_ops)
+{
+ struct device *dev = master_dev->parent;
+ struct device_node *ep, *port, *remote, *parent;
+ struct component_match *match = NULL;
+ int i;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ component_match_add(master_dev, &match, compare_of, dev->of_node);
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ parent = of_node_get(parent);
+
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(master_dev, &match, compare_of, parent);
+ of_node_put(parent);
+ of_node_put(port);
+ }
+
+ parent = dev->of_node;
+ for (i = 0; ; i++) {
+ parent = of_node_get(parent);
+ if (!of_device_is_available(parent)) {
+ of_node_put(parent);
+ continue;
+ }
+
+ for_each_endpoint_of_node(parent, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote) ||
+ remote == dev->of_node) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent dev of %s unavailable\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+ component_match_add(master_dev, &match, compare_of,
+ remote);
+ of_node_put(remote);
+ }
+ of_node_put(parent);
+
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ parent = port->parent;
+ if (!of_node_cmp(parent->name, "ports"))
+ parent = parent->parent;
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(master_dev, m_ops, match);
+}
+
+static int xlnx_compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int xlnx_platform_probe(struct platform_device *pdev)
+{
+ return xlnx_of_component_probe(&pdev->dev, xlnx_compare_of,
+ &xlnx_master_ops);
+}
+
+static int xlnx_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+ return 0;
+}
+
+static void xlnx_platform_shutdown(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &xlnx_master_ops);
+}
+
+static int __maybe_unused xlnx_pm_suspend(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_kms_helper_poll_disable(drm);
+
+ xlnx_drm->suspend_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(xlnx_drm->suspend_state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(xlnx_drm->suspend_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused xlnx_pm_resume(struct device *dev)
+{
+ struct xlnx_drm *xlnx_drm = dev_get_drvdata(dev);
+ struct drm_device *drm = xlnx_drm->drm;
+
+ drm_atomic_helper_resume(drm, xlnx_drm->suspend_state);
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xlnx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xlnx_pm_suspend, xlnx_pm_resume)
+};
+
+static struct platform_driver xlnx_driver = {
+ .probe = xlnx_platform_probe,
+ .remove = xlnx_platform_remove,
+ .shutdown = xlnx_platform_shutdown,
+ .driver = {
+ .name = "xlnx-drm",
+ .pm = &xlnx_pm_ops,
+ },
+};
+
+/* bitmap for master id */
+static u32 xlnx_master_ids = GENMASK(31, 0);
+
+/**
+ * xlnx_drm_pipeline_init - Initialize the drm pipeline for the device
+ * @pdev: The platform device to initialize the drm pipeline device
+ *
+ * This function initializes the drm pipeline device, struct drm_device,
+ * on @pdev by creating a logical master platform device. The logical platform
+ * device acts as a master device to bind slave devices and represents
+ * the entire pipeline.
+ * The logical master uses the port bindings of the calling device to
+ * figure out the pipeline topology.
+ *
+ * Return: the logical master platform device if the drm device is initialized
+ * on @pdev. Error code otherwise.
+ */
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *pdev)
+{
+ struct platform_device *master;
+ int id, ret;
+
+ id = ffs(xlnx_master_ids);
+ if (!id)
+ return ERR_PTR(-ENOSPC);
+
+ master = platform_device_alloc("xlnx-drm", id - 1);
+ if (!master)
+ return ERR_PTR(-ENOMEM);
+
+ master->dev.parent = &pdev->dev;
+ ret = platform_device_add(master);
+ if (ret)
+ goto err_out;
+
+ WARN_ON(master->id != id - 1);
+ xlnx_master_ids &= ~BIT(master->id);
+ return master;
+
+err_out:
+ platform_device_unregister(master);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_init);
+
+/**
+ * xlnx_drm_pipeline_exit - Release the drm pipeline for the device
+ * @master: The master pipeline device to release
+ *
+ * Release the logical pipeline device returned by xlnx_drm_pipeline_init().
+ */
+void xlnx_drm_pipeline_exit(struct platform_device *master)
+{
+ xlnx_master_ids |= BIT(master->id);
+ platform_device_unregister(master);
+}
+EXPORT_SYMBOL_GPL(xlnx_drm_pipeline_exit);
+
+static int __init xlnx_drm_drv_init(void)
+{
+ xlnx_bridge_helper_init();
+ platform_driver_register(&xlnx_driver);
+ return 0;
+}
+
+static void __exit xlnx_drm_drv_exit(void)
+{
+ platform_driver_unregister(&xlnx_driver);
+ xlnx_bridge_helper_fini();
+}
+
+module_init(xlnx_drm_drv_init);
+module_exit(xlnx_drm_drv_exit);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx DRM KMS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_drv.h b/drivers/gpu/drm/xlnx/xlnx_drv.h
new file mode 100644
index 000000000000..0f6595f1bd85
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_drv.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Header for Xilinx
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_DRV_H_
+#define _XLNX_DRV_H_
+
+struct drm_device;
+struct xlnx_crtc_helper;
+
+struct platform_device *xlnx_drm_pipeline_init(struct platform_device *parent);
+void xlnx_drm_pipeline_exit(struct platform_device *pipeline);
+
+uint32_t xlnx_get_format(struct drm_device *drm);
+unsigned int xlnx_get_align(struct drm_device *drm);
+struct xlnx_crtc_helper *xlnx_get_crtc_helper(struct drm_device *drm);
+struct xlnx_bridge_helper *xlnx_get_bridge_helper(struct drm_device *drm);
+
+#endif /* _XLNX_DRV_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_dsi.c b/drivers/gpu/drm/xlnx/xlnx_dsi.c
new file mode 100644
index 000000000000..273339ee4719
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_dsi.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA MIPI DSI Tx Controller driver.
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Siva Rajesh J <siva.rajesh.jarugula@xilinx.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "xlnx_bridge.h"
+
+/* DSI Tx IP registers */
+#define XDSI_CCR 0x00
+#define XDSI_CCR_COREENB BIT(0)
+#define XDSI_CCR_SOFTRST BIT(1)
+#define XDSI_CCR_CRREADY BIT(2)
+#define XDSI_CCR_CMDMODE BIT(3)
+#define XDSI_CCR_DFIFORST BIT(4)
+#define XDSI_CCR_CMDFIFORST BIT(5)
+#define XDSI_PCR 0x04
+#define XDSI_PCR_VIDEOMODE(x) (((x) & 0x3) << 3)
+#define XDSI_PCR_VIDEOMODE_MASK (0x3 << 3)
+#define XDSI_PCR_VIDEOMODE_SHIFT 3
+#define XDSI_PCR_BLLPTYPE(x) ((x) << 5)
+#define XDSI_PCR_BLLPMODE(x) ((x) << 6)
+#define XDSI_PCR_EOTPENABLE(x) ((x) << 13)
+#define XDSI_GIER 0x20
+#define XDSI_ISR 0x24
+#define XDSI_IER 0x28
+#define XDSI_STR 0x2C
+#define XDSI_STR_RDY_SHPKT BIT(6)
+#define XDSI_STR_RDY_LNGPKT BIT(7)
+#define XDSI_STR_DFIFO_FULL BIT(8)
+#define XDSI_STR_DFIFO_EMPTY BIT(9)
+#define XDSI_STR_WAITFR_DATA BIT(10)
+#define XDSI_STR_CMD_EXE_PGS BIT(11)
+#define XDSI_STR_CCMD_PROC BIT(12)
+#define XDSI_STR_LPKT_MASK (0x5 << 7)
+#define XDSI_CMD 0x30
+#define XDSI_CMD_QUEUE_PACKET(x) ((x) & GENMASK(23, 0))
+#define XDSI_DFR 0x34
+#define XDSI_TIME1 0x50
+#define XDSI_TIME1_BLLP_BURST(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME1_HSA(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME2 0x54
+#define XDSI_TIME2_VACT(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME2_HACT(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_HACT_MULTIPLIER GENMASK(1, 0)
+#define XDSI_TIME3 0x58
+#define XDSI_TIME3_HFP(x) ((x) & GENMASK(15, 0))
+#define XDSI_TIME3_HBP(x) (((x) & GENMASK(15, 0)) << 16)
+#define XDSI_TIME4 0x5c
+#define XDSI_TIME4_VFP(x) ((x) & GENMASK(7, 0))
+#define XDSI_TIME4_VBP(x) (((x) & GENMASK(7, 0)) << 8)
+#define XDSI_TIME4_VSA(x) (((x) & GENMASK(7, 0)) << 16)
+#define XDSI_LTIME 0x60
+#define XDSI_BLLP_TIME 0x64
+/*
+ * XDSI_NUM_DATA_T represents number of data types in the
+ * enum mipi_dsi_pixel_format in the MIPI DSI part of DRM framework.
+ */
+#define XDSI_NUM_DATA_T 4
+#define XDSI_VIDEO_MODE_SYNC_PULSE 0x0
+#define XDSI_VIDEO_MODE_SYNC_EVENT 0x1
+#define XDSI_VIDEO_MODE_BURST 0x2
+
+#define XDSI_DPHY_CLK_MIN 197000000000UL
+#define XDSI_DPHY_CLK_MAX 203000000000UL
+#define XDSI_DPHY_CLK_REQ 200000000000UL
+
+/* command timeout in usec */
+#define XDSI_CMD_TIMEOUT_VAL (3000)
+
+/**
+ * struct xlnx_dsi - Core configuration DSI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @dsi_host: DSI host device
+ * @connector: DRM connector structure
+ * @panel_node: MIPI DSI device panel node
+ * @panel: DRM panel structure
+ * @dev: device structure
+ * @iomem: Base address of DSI subsystem
+ * @lanes: number of active data lanes supported by DSI controller
+ * @cmdmode: command mode support
+ * @mode_flags: DSI operation mode related flags
+ * @format: pixel format for video mode of DSI controller
+ * @vm: videomode data structure
+ * @mul_factor: multiplication factor for HACT timing parameter
+ * @eotp_prop: configurable EoTP DSI parameter
+ * @bllp_mode_prop: configurable BLLP mode DSI parameter
+ * @bllp_type_prop: configurable BLLP type DSI parameter
+ * @video_mode_prop: configurable Video mode DSI parameter
+ * @bllp_burst_time_prop: Configurable BLLP time for burst mode
+ * @cmd_queue_prop: configurable command queue
+ * @eotp_prop_val: configurable EoTP DSI parameter value
+ * @bllp_mode_prop_val: configurable BLLP mode DSI parameter value
+ * @bllp_type_prop_val: configurable BLLP type DSI parameter value
+ * @video_mode_prop_val: configurable Video mode DSI parameter value
+ * @bllp_burst_time_prop_val: Configurable BLLP time for burst mode value
+ * @cmd_queue_prop_val: configurable command queue value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock and AXI Lite clock
+ */
+struct xlnx_dsi {
+ struct drm_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+ struct drm_connector connector;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct device *dev;
+ void __iomem *iomem;
+ u32 lanes;
+ bool cmdmode;
+ u32 mode_flags;
+ enum mipi_dsi_pixel_format format;
+ struct videomode vm;
+ u32 mul_factor;
+ struct drm_property *eotp_prop;
+ struct drm_property *bllp_mode_prop;
+ struct drm_property *bllp_type_prop;
+ struct drm_property *video_mode_prop;
+ struct drm_property *bllp_burst_time_prop;
+ struct drm_property *cmd_queue_prop;
+ bool eotp_prop_val;
+ bool bllp_mode_prop_val;
+ bool bllp_type_prop_val;
+ u32 video_mode_prop_val;
+ u32 bllp_burst_time_prop_val;
+ u32 cmd_queue_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+};
+
+#define host_to_dsi(host) container_of(host, struct xlnx_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct xlnx_dsi, connector)
+#define encoder_to_dsi(e) container_of(e, struct xlnx_dsi, encoder)
+
+static inline void xlnx_dsi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_dsi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_dsi_set_config_parameters - Configure DSI Tx registers with parameters
+ * given from user application.
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI structure having drm_property parameters
+ * configured from user application and writes them into DSI IP registers.
+ */
+static void xlnx_dsi_set_config_parameters(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = XDSI_PCR_EOTPENABLE(dsi->eotp_prop_val);
+ reg |= XDSI_PCR_VIDEOMODE(dsi->video_mode_prop_val);
+ reg |= XDSI_PCR_BLLPTYPE(dsi->bllp_type_prop_val);
+ reg |= XDSI_PCR_BLLPMODE(dsi->bllp_mode_prop_val);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_PCR, reg);
+ /*
+ * Configure the burst time if video mode is burst.
+ * HSA of TIME1 register is ignored in this mode.
+ */
+ if (dsi->video_mode_prop_val == XDSI_VIDEO_MODE_BURST) {
+ reg = XDSI_TIME1_BLLP_BURST(dsi->bllp_burst_time_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_CMD_QUEUE_PACKET(dsi->cmd_queue_prop_val);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, reg);
+
+ dev_dbg(dsi->dev, "PCR register value is = %x\n",
+ xlnx_dsi_readl(dsi->iomem, XDSI_PCR));
+}
+
+/**
+ * xlnx_dsi_set_display_mode - Configure DSI timing registers
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function writes the timing parameters of DSI IP which are
+ * retrieved from panel timing values.
+ */
+static void xlnx_dsi_set_display_mode(struct xlnx_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 reg, video_mode;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_PCR);
+ video_mode = (reg & XDSI_PCR_VIDEOMODE_MASK) >>
+ XDSI_PCR_VIDEOMODE_SHIFT;
+
+ /* configure the HSA value only if non_burst_sync_pluse video mode */
+ if (!video_mode &&
+ (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) {
+ reg = XDSI_TIME1_HSA(vm->hsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME1, reg);
+ }
+
+ reg = XDSI_TIME4_VFP(vm->vfront_porch) |
+ XDSI_TIME4_VBP(vm->vback_porch) |
+ XDSI_TIME4_VSA(vm->vsync_len);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME4, reg);
+
+ reg = XDSI_TIME3_HFP(vm->hfront_porch) |
+ XDSI_TIME3_HBP(vm->hback_porch);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME3, reg);
+
+ dev_dbg(dsi->dev, "mul factor for parsed datatype is = %d\n",
+ (dsi->mul_factor) / 100);
+ /*
+ * The HACT parameter received from panel timing values should be
+ * divisible by 4. The reason for this is, the word count given as
+ * input to DSI controller is HACT * mul_factor. The mul_factor is
+ * 3, 2.25, 2.25, 2 respectively for RGB888, RGB666_L, RGB666_P and
+ * RGB565.
+ * e.g. for RGB666_L color format and 1080p, the word count is
+ * 1920*2.25 = 4320 which is divisible by 4 and it is a valid input
+ * to DSI controller. Based on this 2.25 mul factor, we come up with
+ * the division factor of (XDSI_HACT_MULTIPLIER) as 4 for checking
+ */
+ if ((vm->hactive & XDSI_HACT_MULTIPLIER) != 0)
+ dev_warn(dsi->dev, "Incorrect HACT will be programmed\n");
+
+ reg = XDSI_TIME2_HACT((vm->hactive) * (dsi->mul_factor) / 100) |
+ XDSI_TIME2_VACT(vm->vactive);
+ xlnx_dsi_writel(dsi->iomem, XDSI_TIME2, reg);
+
+ dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
+}
+
+/**
+ * xlnx_dsi_set_display_enable - Enables the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_enable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg |= XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "MIPI DSI Tx controller is enabled.\n");
+}
+
+/**
+ * xlnx_dsi_set_display_disable - Disable the DSI Tx IP core enable
+ * register bit
+ * @dsi: DSI structure having the updated user parameters
+ *
+ * This function takes the DSI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_dsi_set_display_disable(struct xlnx_dsi *dsi)
+{
+ u32 reg;
+
+ reg = xlnx_dsi_readl(dsi->iomem, XDSI_CCR);
+ reg &= ~XDSI_CCR_COREENB;
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, reg);
+ dev_dbg(dsi->dev, "DSI Tx is disabled. reset regs to default values\n");
+}
+
+/**
+ * xlnx_dsi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx DSI connector
+ * @state: DRM connector state
+ * @prop: pointer to the drm_property structure
+ * @val: DSI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the DSI structure property varabiles with the values.
+ * These values are later used to configure the DSI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int xlnx_dsi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *prop, u64 val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dev_dbg(dsi->dev, "property name = %s, value = %lld\n",
+ prop->name, val);
+
+ if (prop == dsi->eotp_prop)
+ dsi->eotp_prop_val = !!val;
+ else if (prop == dsi->bllp_mode_prop)
+ dsi->bllp_mode_prop_val = !!val;
+ else if (prop == dsi->bllp_type_prop)
+ dsi->bllp_type_prop_val = !!val;
+ else if (prop == dsi->video_mode_prop)
+ dsi->video_mode_prop_val = (unsigned int)val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ dsi->bllp_burst_time_prop_val = (unsigned int)val;
+ else if (prop == dsi->cmd_queue_prop)
+ dsi->cmd_queue_prop_val = (unsigned int)val;
+ else if (prop == dsi->height_out)
+ dsi->height_out_prop_val = (u32)val;
+ else if (prop == dsi->width_out)
+ dsi->width_out_prop_val = (u32)val;
+ else if (prop == dsi->in_fmt)
+ dsi->in_fmt_prop_val = (u32)val;
+ else if (prop == dsi->out_fmt)
+ dsi->out_fmt_prop_val = (u32)val;
+ else
+ return -EINVAL;
+
+ xlnx_dsi_set_config_parameters(dsi);
+
+ return 0;
+}
+
+static int
+xlnx_dsi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *prop, uint64_t *val)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (prop == dsi->eotp_prop)
+ *val = dsi->eotp_prop_val;
+ else if (prop == dsi->bllp_mode_prop)
+ *val = dsi->bllp_mode_prop_val;
+ else if (prop == dsi->bllp_type_prop)
+ *val = dsi->bllp_type_prop_val;
+ else if (prop == dsi->video_mode_prop)
+ *val = dsi->video_mode_prop_val;
+ else if (prop == dsi->bllp_burst_time_prop)
+ *val = dsi->bllp_burst_time_prop_val;
+ else if (prop == dsi->cmd_queue_prop)
+ *val = dsi->cmd_queue_prop_val;
+ else if (prop == dsi->height_out)
+ *val = dsi->height_out_prop_val;
+ else if (prop == dsi->width_out)
+ *val = dsi->width_out_prop_val;
+ else if (prop == dsi->in_fmt)
+ *val = dsi->in_fmt_prop_val;
+ else if (prop == dsi->out_fmt)
+ *val = dsi->out_fmt_prop_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_host_transfer - transfer command to panel
+ * @host: mipi dsi host structure
+ * @msg: mipi dsi msg with type, length and data
+ *
+ * This function is valid only in command mode.
+ * It checks the command fifo empty status and writes into
+ * data or cmd register and waits for the completion status.
+ *
+ * Return: number of bytes, on success and error number on failure
+ */
+static ssize_t xlnx_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+ u32 data0, data1, cmd0, status, val;
+ const char *tx_buf = msg->tx_buf;
+
+ if (!(xlnx_dsi_readl(dsi->iomem, XDSI_CCR) & (XDSI_CCR_COREENB |
+ XDSI_CCR_CMDMODE))) {
+ dev_err(dsi->dev, "dsi command mode not enabled\n");
+ return -EINVAL;
+ }
+
+ if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ ((val & XDSI_STR_LPKT_MASK) ==
+ XDSI_STR_LPKT_MASK), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "long cmd fifo not empty!\n");
+ return -ETIMEDOUT;
+ }
+ data0 = tx_buf[0] | (tx_buf[1] << 8) | (tx_buf[2] << 16) |
+ (tx_buf[3] << 24);
+ data1 = tx_buf[4] | (tx_buf[5] << 8);
+ cmd0 = msg->type | (MIPI_DSI_DCS_READ << 8);
+
+ xlnx_dsi_writel(dsi->iomem, XDSI_DFR, data0);
+ xlnx_dsi_writel(dsi->iomem, XDSI_DFR, data1);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, cmd0);
+ } else {
+ data0 = tx_buf[0];
+ if (msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM)
+ data0 = MIPI_DSI_DCS_SHORT_WRITE_PARAM |
+ (tx_buf[0] << 8) | (tx_buf[1] << 16);
+ else
+ data0 = MIPI_DSI_DCS_SHORT_WRITE | (tx_buf[0] << 8);
+
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ ((val & XDSI_STR_RDY_SHPKT) ==
+ XDSI_STR_RDY_SHPKT), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "short cmd fifo not empty\n");
+ return -ETIMEDOUT;
+ }
+ xlnx_dsi_writel(dsi->iomem, XDSI_CMD, data0);
+ }
+
+ status = readl_poll_timeout(dsi->iomem + XDSI_STR, val,
+ (!(val & XDSI_STR_CMD_EXE_PGS)), 1,
+ XDSI_CMD_TIMEOUT_VAL);
+ if (status) {
+ dev_err(dsi->dev, "cmd time out\n");
+ return -ETIMEDOUT;
+ }
+
+ return msg->tx_len;
+}
+
+static int xlnx_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ u32 panel_lanes;
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ panel_lanes = device->lanes;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel_node = device->dev.of_node;
+
+ if (panel_lanes != dsi->lanes) {
+ dev_err(dsi->dev, "Mismatch of lanes. panel = %d, DSI = %d\n",
+ panel_lanes, dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid xlnx,dsi-num-lanes\n",
+ dsi->lanes);
+ return -EINVAL;
+ }
+
+ if (device->format != dsi->format) {
+ dev_err(dsi->dev, "Mismatch of format. panel = %d, DSI = %d\n",
+ device->format, dsi->format);
+ return -EINVAL;
+ }
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static int xlnx_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct xlnx_dsi *dsi = host_to_dsi(host);
+
+ dsi->panel = NULL;
+
+ if (dsi->connector.dev)
+ drm_helper_hpd_irq_event(dsi->connector.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops xlnx_dsi_ops = {
+ .attach = xlnx_dsi_host_attach,
+ .detach = xlnx_dsi_host_detach,
+ .transfer = xlnx_dsi_host_transfer,
+};
+
+static int xlnx_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ int ret;
+
+ dev_dbg(dsi->dev, "connector dpms state: %d\n", mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ ret = drm_panel_prepare(dsi->panel);
+ if (ret < 0) {
+ dev_err(dsi->dev, "DRM panel not found\n");
+ return ret;
+ }
+
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ drm_panel_unprepare(dsi->panel);
+ dev_err(dsi->dev, "DRM panel not enabled\n");
+ return ret;
+ }
+ break;
+ default:
+ drm_panel_disable(dsi->panel);
+ drm_panel_unprepare(dsi->panel);
+ break;
+ }
+
+ return drm_helper_connector_dpms(connector, mode);
+}
+
+static enum drm_connector_status
+xlnx_dsi_detect(struct drm_connector *connector, bool force)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (!dsi->panel) {
+ dsi->panel = of_drm_find_panel(dsi->panel_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ if (dsi->cmdmode) {
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR,
+ XDSI_CCR_CMDMODE |
+ XDSI_CCR_COREENB);
+ drm_panel_prepare(dsi->panel);
+ xlnx_dsi_writel(dsi->iomem, XDSI_CCR, 0);
+ }
+ }
+ } else if (!dsi->panel_node) {
+ xlnx_dsi_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ }
+
+ if (dsi->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
+}
+
+static void xlnx_dsi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_dsi_connector_funcs = {
+ .dpms = xlnx_dsi_connector_dpms,
+ .detect = xlnx_dsi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_dsi_connector_destroy,
+ .atomic_set_property = xlnx_dsi_atomic_set_property,
+ .atomic_get_property = xlnx_dsi_atomic_get_property,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int xlnx_dsi_get_modes(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ if (dsi->panel)
+ return dsi->panel->funcs->get_modes(dsi->panel, connector);
+
+ return 0;
+}
+
+static struct drm_encoder *
+xlnx_dsi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_dsi(connector)->encoder);
+}
+
+static struct drm_connector_helper_funcs xlnx_dsi_connector_helper_funcs = {
+ .get_modes = xlnx_dsi_get_modes,
+ .best_encoder = xlnx_dsi_best_encoder,
+};
+
+/**
+ * xlnx_dsi_connector_create_property - create DSI connector properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ *
+ * This function takes the xilinx DSI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void xlnx_dsi_connector_create_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+
+ dsi->eotp_prop = drm_property_create_bool(dev, 0, "eotp");
+ dsi->video_mode_prop = drm_property_create_range(dev, 0, "video_mode",
+ 0, 2);
+ dsi->bllp_mode_prop = drm_property_create_bool(dev, 0, "bllp_mode");
+ dsi->bllp_type_prop = drm_property_create_bool(dev, 0, "bllp_type");
+ dsi->bllp_burst_time_prop =
+ drm_property_create_range(dev, 0, "bllp_burst_time", 0, 0xFFFF);
+ dsi->cmd_queue_prop = drm_property_create_range(dev, 0, "cmd_queue", 0,
+ 0xffffff);
+ dsi->height_out = drm_property_create_range(dev, 0, "height_out",
+ 2, 4096);
+ dsi->width_out = drm_property_create_range(dev, 0, "width_out",
+ 2, 4096);
+ dsi->in_fmt = drm_property_create_range(dev, 0, "in_fmt", 0, 16384);
+ dsi->out_fmt = drm_property_create_range(dev, 0, "out_fmt", 0, 16384);
+}
+
+/**
+ * xlnx_dsi_connector_attach_property - attach DSI connector
+ * properties
+ *
+ * @connector: pointer to Xilinx DSI connector
+ */
+static void xlnx_dsi_connector_attach_property(struct drm_connector *connector)
+{
+ struct xlnx_dsi *dsi = connector_to_dsi(connector);
+ struct drm_mode_object *obj = &connector->base;
+
+ if (dsi->eotp_prop)
+ drm_object_attach_property(obj, dsi->eotp_prop, 1);
+
+ if (dsi->video_mode_prop)
+ drm_object_attach_property(obj, dsi->video_mode_prop, 0);
+
+ if (dsi->bllp_burst_time_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_burst_time_prop, 0);
+
+ if (dsi->bllp_mode_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_mode_prop, 0);
+
+ if (dsi->bllp_type_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->bllp_type_prop, 0);
+
+ if (dsi->cmd_queue_prop)
+ drm_object_attach_property(&connector->base,
+ dsi->cmd_queue_prop, 0);
+
+ if (dsi->height_out)
+ drm_object_attach_property(obj, dsi->height_out, 0);
+
+ if (dsi->width_out)
+ drm_object_attach_property(obj, dsi->width_out, 0);
+
+ if (dsi->in_fmt)
+ drm_object_attach_property(obj, dsi->in_fmt, 0);
+
+ if (dsi->out_fmt)
+ drm_object_attach_property(obj, dsi->out_fmt, 0);
+}
+
+static int xlnx_dsi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct drm_connector *connector = &dsi->connector;
+ int ret;
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ dev_err(dsi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_dsi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_dsi_connector_create_property(connector);
+ xlnx_dsi_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_dsi_atomic_mode_set - derive the DSI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: Pointer to drm core crtc state
+ * @connector_state: DSI connector drm state
+ *
+ * This function derives the DSI IP timing parameters from the timing
+ * values given in the attached panel driver.
+ */
+static void
+xlnx_dsi_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+ struct videomode *vm = &dsi->vm;
+ struct drm_display_mode *m = &crtc_state->adjusted_mode;
+
+ /* Set bridge input and output parameters */
+ xlnx_bridge_set_input(dsi->bridge, m->hdisplay, m->vdisplay,
+ dsi->in_fmt_prop_val);
+ xlnx_bridge_set_output(dsi->bridge, dsi->width_out_prop_val,
+ dsi->height_out_prop_val,
+ dsi->out_fmt_prop_val);
+ xlnx_bridge_enable(dsi->bridge);
+
+ vm->hactive = m->hdisplay;
+ vm->vactive = m->vdisplay;
+ vm->vfront_porch = m->vsync_start - m->vdisplay;
+ vm->vback_porch = m->vtotal - m->vsync_end;
+ vm->vsync_len = m->vsync_end - m->vsync_start;
+ vm->hfront_porch = m->hsync_start - m->hdisplay;
+ vm->hback_porch = m->htotal - m->hsync_end;
+ vm->hsync_len = m->hsync_end - m->hsync_start;
+ xlnx_dsi_set_display_mode(dsi);
+}
+
+static void xlnx_dsi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ if (dsi->bridge)
+ xlnx_bridge_disable(dsi->bridge);
+
+ xlnx_dsi_set_display_disable(dsi);
+}
+
+static void xlnx_dsi_enable(struct drm_encoder *encoder)
+{
+ struct xlnx_dsi *dsi = encoder_to_dsi(encoder);
+
+ xlnx_dsi_set_display_enable(dsi);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_dsi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_dsi_atomic_mode_set,
+ .enable = xlnx_dsi_enable,
+ .disable = xlnx_dsi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_dsi_parse_dt(struct xlnx_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 datatype;
+ static const int xdsi_mul_fact[XDSI_NUM_DATA_T] = {300, 225, 225, 200};
+
+ dsi->dphy_clk_200M = devm_clk_get(dev, "dphy_clk_200M");
+ if (IS_ERR(dsi->dphy_clk_200M)) {
+ ret = PTR_ERR(dsi->dphy_clk_200M);
+ dev_err(dev, "failed to get dphy_clk_200M %d\n", ret);
+ return ret;
+ }
+
+ dsi->video_aclk = devm_clk_get(dev, "s_axis_aclk");
+ if (IS_ERR(dsi->video_aclk)) {
+ ret = PTR_ERR(dsi->video_aclk);
+ dev_err(dev, "failed to get video_clk %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Used as a multiplication factor for HACT based on used
+ * DSI data type.
+ *
+ * e.g. for RGB666_L datatype and 1920x1080 resolution,
+ * the Hact (WC) would be as follows -
+ * 1920 pixels * 18 bits per pixel / 8 bits per byte
+ * = 1920 pixels * 2.25 bytes per pixel = 4320 bytes.
+ *
+ * Data Type - Multiplication factor
+ * RGB888 - 3
+ * RGB666_L - 2.25
+- * RGB666_P - 2.25
+ * RGB565 - 2
+ *
+ * Since the multiplication factor maybe a floating number,
+ * a 100x multiplication factor is used.
+ */
+ ret = of_property_read_u32(node, "xlnx,dsi-num-lanes", &dsi->lanes);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-num-lanes property\n");
+ return ret;
+ }
+ if (dsi->lanes > 4 || dsi->lanes < 1) {
+ dev_err(dsi->dev, "%d lanes : invalid lanes\n", dsi->lanes);
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dsi-data-type", &datatype);
+ if (ret < 0) {
+ dev_err(dsi->dev, "missing xlnx,dsi-data-type property\n");
+ return ret;
+ }
+ dsi->format = datatype;
+ if (datatype > MIPI_DSI_FMT_RGB565) {
+ dev_err(dsi->dev, "Invalid xlnx,dsi-data-type string\n");
+ return -EINVAL;
+ }
+ dsi->mul_factor = xdsi_mul_fact[datatype];
+
+ dsi->cmdmode = of_property_read_bool(node, "xlnx,dsi-cmd-mode");
+
+ dev_dbg(dsi->dev, "DSI controller num lanes = %d", dsi->lanes);
+ dev_dbg(dsi->dev, "DSI controller datatype = %d\n", datatype);
+ dev_dbg(dsi->dev, "DSI controller cmd mode = %d\n", dsi->cmdmode);
+
+ return 0;
+}
+
+static int xlnx_dsi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * DSI tx drivers. DRM framework can support more than one CRTCs and
+ * DSI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+ drm_encoder_init(drm_dev, encoder, &xlnx_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &xlnx_dsi_encoder_helper_funcs);
+ ret = xlnx_dsi_create_connector(encoder);
+ if (ret) {
+ dev_err(dsi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ xlnx_dsi_connector_destroy(&dsi->connector);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ return 0;
+}
+
+static void xlnx_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_dsi *dsi = dev_get_drvdata(dev);
+
+ xlnx_dsi_disable(&dsi->encoder);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ xlnx_bridge_disable(dsi->bridge);
+}
+
+static const struct component_ops xlnx_dsi_component_ops = {
+ .bind = xlnx_dsi_bind,
+ .unbind = xlnx_dsi_unbind,
+};
+
+static int xlnx_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_dsi *dsi;
+ struct device_node *vpss_node;
+ int ret;
+ unsigned long rate;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dsi_host.ops = &xlnx_dsi_ops;
+ dsi->dsi_host.dev = dev;
+ dsi->dev = dev;
+
+ ret = xlnx_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->iomem))
+ return PTR_ERR(dsi->iomem);
+
+ platform_set_drvdata(pdev, dsi);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(dsi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ dsi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!dsi->bridge) {
+ dev_info(dsi->dev, "Didn't get bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = clk_set_rate(dsi->dphy_clk_200M, XDSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(dev, "failed to set dphy clk rate %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->dphy_clk_200M);
+ if (ret) {
+ dev_err(dev, "failed to enable dphy clk %d\n", ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(dsi->dphy_clk_200M);
+ if (rate < XDSI_DPHY_CLK_MIN && rate > XDSI_DPHY_CLK_MAX) {
+ dev_err(dev, "Error DPHY clock = %lu\n", rate);
+ ret = -EINVAL;
+ goto err_disable_dphy_clk;
+ }
+
+ ret = clk_prepare_enable(dsi->video_aclk);
+ if (ret) {
+ dev_err(dev, "failed to enable video clk %d\n", ret);
+ goto err_disable_dphy_clk;
+ }
+
+ ret = component_add(dev, &xlnx_dsi_component_ops);
+ if (ret < 0)
+ goto err_disable_video_clk;
+
+ return ret;
+
+err_disable_video_clk:
+ clk_disable_unprepare(dsi->video_aclk);
+err_disable_dphy_clk:
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+ return ret;
+}
+
+static int xlnx_dsi_remove(struct platform_device *pdev)
+{
+ struct xlnx_dsi *dsi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_dsi_component_ops);
+ clk_disable_unprepare(dsi->video_aclk);
+ clk_disable_unprepare(dsi->dphy_clk_200M);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_dsi_of_match[] = {
+ { .compatible = "xlnx,dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = xlnx_dsi_probe,
+ .remove = xlnx_dsi_remove,
+ .driver = {
+ .name = "xlnx-dsi",
+ .of_match_table = xlnx_dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Siva Rajesh <sivaraj@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA MIPI DSI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.c b/drivers/gpu/drm/xlnx/xlnx_fb.c
new file mode 100644
index 000000000000..9ccc7fa8cd5b
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * Based on drm_fb_cma_helper.c
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+#include "xlnx_fb.h"
+
+#define XLNX_MAX_PLANES 4
+
+struct xlnx_fbdev {
+ struct drm_fb_helper fb_helper;
+ struct drm_framebuffer *fb;
+ unsigned int align;
+ unsigned int vres_mult;
+};
+
+static inline struct xlnx_fbdev *to_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct xlnx_fbdev, fb_helper);
+}
+
+static struct drm_framebuffer_funcs xlnx_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+static int
+xlnx_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_mode_set *mode_set;
+ int ret = 0;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ drm_client_for_each_modeset(mode_set, &fb_helper->client) {
+ struct drm_crtc *crtc;
+
+ crtc = mode_set->crtc;
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+ }
+ return ret;
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct fb_ops xlnx_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_ioctl = xlnx_fb_ioctl,
+};
+
+static struct drm_framebuffer *
+xlnx_fb_gem_fb_alloc(struct drm_device *drm,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(drm, fb, funcs);
+ if (ret) {
+ dev_err(drm->dev, "Failed to init framebuffer: %d\n", ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+static struct drm_framebuffer *
+xlnx_fb_gem_fbdev_fb_create(struct drm_device *drm,
+ struct drm_fb_helper_surface_size *size,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ mode_cmd.width = size->surface_width;
+ mode_cmd.height = size->surface_height;
+ mode_cmd.pitches[0] = size->surface_width *
+ DIV_ROUND_UP(size->surface_bpp, 8);
+ if (pitch_align)
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
+ pitch_align);
+ mode_cmd.pixel_format = drm_driver_legacy_fb_format(drm,
+ size->surface_bpp,
+ size->surface_depth);
+ if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
+ return ERR_PTR(-EINVAL);
+
+ return xlnx_fb_gem_fb_alloc(drm, &mode_cmd, &obj, 1, funcs);
+}
+
+/**
+ * xlnx_fbdev_create - Create the fbdev with a framebuffer
+ * @fb_helper: fb helper structure
+ * @size: framebuffer size info
+ *
+ * This function is based on drm_fbdev_cma_create().
+ *
+ * Return: 0 if successful, or the error code.
+ */
+static int xlnx_fbdev_create(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *size)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+ struct drm_device *drm = fb_helper->dev;
+ struct drm_gem_cma_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ u32 format;
+ const struct drm_format_info *info;
+ size_t bytes;
+ int ret;
+
+ dev_dbg(drm->dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ size->surface_width, size->surface_height, size->surface_bpp);
+
+ size->surface_height *= fbdev->vres_mult;
+ bytes_per_pixel = DIV_ROUND_UP(size->surface_bpp, 8);
+ bytes = ALIGN(size->surface_width * bytes_per_pixel, fbdev->align);
+ bytes *= size->surface_height;
+
+ obj = drm_gem_cma_create(drm, bytes);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ fbi = framebuffer_alloc(0, drm->dev);
+ if (!fbi) {
+ dev_err(drm->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_drm_gem_cma_free_object;
+ }
+
+ /* Override the depth given by fb helper with current format value */
+ format = xlnx_get_format(drm);
+ info = drm_format_info(format);
+ if (size->surface_bpp == info->cpp[0] * 8)
+ size->surface_depth = info->depth;
+
+ fbdev->fb = xlnx_fb_gem_fbdev_fb_create(drm, size, fbdev->align,
+ &obj->base, &xlnx_fb_funcs);
+ if (IS_ERR(fbdev->fb)) {
+ dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(fbdev->fb);
+ goto err_framebuffer_release;
+ }
+
+ fb = fbdev->fb;
+ fb_helper->fb = fb;
+ fb_helper->fbdev = fbi;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &xlnx_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(drm->dev, "Failed to allocate color map.\n");
+ goto err_fb_destroy;
+ }
+
+ drm_fb_helper_fill_info(fbi, fb_helper, size);
+ fbi->var.yres = fb->height / fbdev->vres_mult;
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ drm->mode_config.fb_base = (resource_size_t)obj->paddr;
+ fbi->screen_base = (char __iomem *)(obj->vaddr + offset);
+ fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+ fbi->screen_size = bytes;
+ fbi->fix.smem_len = bytes;
+
+ return 0;
+
+err_fb_destroy:
+ drm_framebuffer_unregister_private(fb);
+ drm_gem_fb_destroy(fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+ drm_gem_cma_free_object(&obj->base);
+ return ret;
+}
+
+static struct drm_fb_helper_funcs xlnx_fb_helper_funcs = {
+ .fb_probe = xlnx_fbdev_create,
+};
+
+/**
+ * xlnx_fb_init - Allocate and initializes the Xilinx framebuffer
+ * @drm: DRM device
+ * @preferred_bpp: preferred bits per pixel for the device
+ * @max_conn_count: maximum number of connectors
+ * @align: alignment value for pitch
+ * @vres_mult: multiplier for virtual resolution
+ *
+ * This function is based on drm_fbdev_cma_init().
+ *
+ * Return: a newly allocated drm_fb_helper struct or a ERR_PTR.
+ */
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult)
+{
+ struct xlnx_fbdev *fbdev;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ return ERR_PTR(-ENOMEM);
+
+ fbdev->vres_mult = vres_mult;
+ fbdev->align = align;
+ fb_helper = &fbdev->fb_helper;
+ drm_fb_helper_prepare(drm, fb_helper, &xlnx_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(drm, fb_helper);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialize drm fb helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to set initial hw configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return fb_helper;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev);
+ return ERR_PTR(ret);
+}
+
+/**
+ * xlnx_fbdev_defio_fini - Free the defio fb
+ * @fbi: fb_info struct
+ *
+ * This function is based on drm_fbdev_cma_defio_fini().
+ */
+static void xlnx_fbdev_defio_fini(struct fb_info *fbi)
+{
+ if (!fbi->fbdefio)
+ return;
+
+ fb_deferred_io_cleanup(fbi);
+ kfree(fbi->fbdefio);
+ kfree(fbi->fbops);
+}
+
+/**
+ * xlnx_fbdev_fini - Free the Xilinx framebuffer
+ * @fb_helper: drm_fb_helper struct
+ *
+ * This function is based on drm_fbdev_cma_fini().
+ */
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper)
+{
+ struct xlnx_fbdev *fbdev = to_fbdev(fb_helper);
+
+ drm_fb_helper_unregister_fbi(&fbdev->fb_helper);
+ if (fbdev->fb_helper.fbdev)
+ xlnx_fbdev_defio_fini(fbdev->fb_helper.fbdev);
+
+ if (fbdev->fb_helper.fb)
+ drm_framebuffer_remove(fbdev->fb_helper.fb);
+
+ drm_fb_helper_fini(&fbdev->fb_helper);
+ kfree(fbdev);
+}
+
+/**
+ * xlnx_fb_create - (struct drm_mode_config_funcs *)->fb_create callback
+ * @drm: DRM device
+ * @file_priv: drm file private data
+ * @mode_cmd: mode command for fb creation
+ *
+ * This functions creates a drm_framebuffer with xlnx_fb_funcs for given mode
+ * @mode_cmd. This functions is intended to be used for the fb_create callback
+ * function of drm_mode_config_funcs.
+ *
+ * Return: a drm_framebuffer object if successful, or
+ * ERR_PTR from drm_gem_fb_create_with_funcs().
+ */
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
+ &xlnx_fb_funcs);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.h b/drivers/gpu/drm/xlnx/xlnx_fb.h
new file mode 100644
index 000000000000..6efc985f2fb3
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_fb.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS Framebuffer helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_FB_H_
+#define _XLNX_FB_H_
+
+struct drm_fb_helper;
+
+struct drm_framebuffer *
+xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_fb_helper *
+xlnx_fb_init(struct drm_device *drm, int preferred_bpp,
+ unsigned int max_conn_count, unsigned int align,
+ unsigned int vres_mult);
+void xlnx_fb_fini(struct drm_fb_helper *fb_helper);
+
+#endif /* _XLNX_FB_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.c b/drivers/gpu/drm/xlnx/xlnx_gem.c
new file mode 100644
index 000000000000..609bc0cffd07
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_gem_cma_helper.h>
+
+#include "xlnx_drv.h"
+#include "xlnx_gem.h"
+
+/*
+ * xlnx_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * @file_priv: drm_file object
+ * @drm: DRM object
+ * @args: info for dumb scanout buffer creation
+ *
+ * This function is for dumb_create callback of drm_driver struct. Simply
+ * it wraps around drm_gem_cma_dumb_create() and sets the pitch value
+ * by retrieving the value from the device.
+ *
+ * Return: The return value from drm_gem_cma_dumb_create()
+ */
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ unsigned int align = xlnx_get_align(drm);
+
+ if (!args->pitch || !IS_ALIGNED(args->pitch, align))
+ args->pitch = ALIGN(pitch, align);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_gem.h b/drivers/gpu/drm/xlnx/xlnx_gem.h
new file mode 100644
index 000000000000..f380de916379
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_gem.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM KMS GEM helper header
+ *
+ * Copyright (C) 2015 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XLNX_GEM_H_
+#define _XLNX_GEM_H_
+
+int xlnx_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _XLNX_GEM_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_mixer.c b/drivers/gpu/drm/xlnx/xlnx_mixer.c
new file mode 100644
index 000000000000..b21d13888e7a
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_mixer.c
@@ -0,0 +1,3040 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx logicore video mixer driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ * : Jeffrey Mouroux <jmouroux@xilinx.com>
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/dmaengine.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/**************************** Register Data **********************************/
+#define XVMIX_AP_CTRL 0x00000
+#define XVMIX_GIE 0x00004
+#define XVMIX_IER 0x00008
+#define XVMIX_ISR 0x0000c
+#define XVMIX_WIDTH_DATA 0x00010
+#define XVMIX_HEIGHT_DATA 0x00018
+#define XVMIX_BACKGROUND_Y_R_DATA 0x00028
+#define XVMIX_BACKGROUND_U_G_DATA 0x00030
+#define XVMIX_BACKGROUND_V_B_DATA 0x00038
+#define XVMIX_LAYERENABLE_DATA 0x00040
+#define XVMIX_K00_1 0x00048
+#define XVMIX_K01_1 0x00050
+#define XVMIX_K02_1 0x00058
+#define XVMIX_K10_1 0x00060
+#define XVMIX_K11_1 0x00068
+#define XVMIX_K12_1 0x00070
+#define XVMIX_K20_1 0x00078
+#define XVMIX_K21_1 0x00080
+#define XVMIX_K22_1 0x00088
+#define XVMIX_Y_DATA 0x00090
+#define XVMIX_U_DATA 0x00098
+#define XVMIX_V_DATA 0x000A0
+#define XVMIX_LAYERALPHA_0_DATA 0x00100
+#define XVMIX_LAYERSTARTX_0_DATA 0x00108
+#define XVMIX_LAYERSTARTY_0_DATA 0x00110
+#define XVMIX_LAYERWIDTH_0_DATA 0x00118
+#define XVMIX_LAYERSTRIDE_0_DATA 0x00120
+#define XVMIX_LAYERHEIGHT_0_DATA 0x00128
+#define XVMIX_LAYERSCALE_0_DATA 0x00130
+#define XVMIX_LAYERVIDEOFORMAT_0_DATA 0x00138
+#define XVMIX_K00_2 0x00140
+#define XVMIX_K01_2 0x00148
+#define XVMIX_K02_2 0x00150
+#define XVMIX_K10_2 0x00158
+#define XVMIX_K11_2 0x00160
+#define XVMIX_K12_2 0x00168
+#define XVMIX_K20_2 0x00170
+#define XVMIX_K21_2 0x00178
+#define XVMIX_K22_2 0x00180
+#define XVMIX_R_DATA 0x00188
+#define XVMIX_G_DATA 0x00190
+#define XVMIX_B_DATA 0x00198
+#define XVMIX_LAYER1_BUF1_V_DATA 0x00240
+#define XVMIX_LAYER1_BUF2_V_DATA 0x0024c
+#define XVMIX_LOGOSTARTX_DATA 0x01000
+#define XVMIX_LOGOSTARTY_DATA 0x01008
+#define XVMIX_LOGOWIDTH_DATA 0x01010
+#define XVMIX_LOGOHEIGHT_DATA 0x01018
+#define XVMIX_LOGOSCALEFACTOR_DATA 0x01020
+#define XVMIX_LOGOALPHA_DATA 0x01028
+#define XVMIX_LOGOCLRKEYMIN_R_DATA 0x01030
+#define XVMIX_LOGOCLRKEYMIN_G_DATA 0x01038
+#define XVMIX_LOGOCLRKEYMIN_B_DATA 0x01040
+#define XVMIX_LOGOCLRKEYMAX_R_DATA 0x01048
+#define XVMIX_LOGOCLRKEYMAX_G_DATA 0x01050
+#define XVMIX_LOGOCLRKEYMAX_B_DATA 0x01058
+#define XVMIX_LOGOR_V_BASE 0x10000
+#define XVMIX_LOGOR_V_HIGH 0x10fff
+#define XVMIX_LOGOG_V_BASE 0x20000
+#define XVMIX_LOGOG_V_HIGH 0x20fff
+#define XVMIX_LOGOB_V_BASE 0x30000
+#define XVMIX_LOGOB_V_HIGH 0x30fff
+#define XVMIX_LOGOA_V_BASE 0x40000
+#define XVMIX_LOGOA_V_HIGH 0x40fff
+
+/************************** Constant Definitions *****************************/
+#define XVMIX_LOGO_OFFSET 0x1000
+#define XVMIX_MASK_DISABLE_ALL_LAYERS 0x0
+#define XVMIX_REG_OFFSET 0x100
+#define XVMIX_MASTER_LAYER_IDX 0x0
+#define XVMIX_LOGO_LAYER_IDX 0x1
+#define XVMIX_DISP_MAX_WIDTH 4096
+#define XVMIX_DISP_MAX_HEIGHT 2160
+#define XVMIX_MAX_OVERLAY_LAYERS 16
+#define XVMIX_MAX_BPC 16
+#define XVMIX_ALPHA_MIN 0
+#define XVMIX_ALPHA_MAX 256
+#define XVMIX_LAYER_WIDTH_MIN 64
+#define XVMIX_LAYER_HEIGHT_MIN 64
+#define XVMIX_LOGO_LAYER_WIDTH_MIN 32
+#define XVMIX_LOGO_LAYER_HEIGHT_MIN 32
+#define XVMIX_LOGO_LAYER_WIDTH_MAX 256
+#define XVMIX_LOGO_LAYER_HEIGHT_MAX 256
+#define XVMIX_IRQ_DONE_MASK BIT(0)
+#define XVMIX_GIE_EN_MASK BIT(0)
+#define XVMIX_AP_EN_MASK BIT(0)
+#define XVMIX_AP_RST_MASK BIT(7)
+#define XVMIX_MAX_NUM_SUB_PLANES 4
+#define XVMIX_SCALE_FACTOR_1X 0
+#define XVMIX_SCALE_FACTOR_2X 1
+#define XVMIX_SCALE_FACTOR_4X 2
+#define XVMIX_SCALE_FACTOR_INVALID 3
+#define XVMIX_BASE_ALIGN 8
+#define XVMIX_CSC_MAX_ROWS (3)
+#define XVMIX_CSC_MAX_COLS (3)
+#define XVMIX_CSC_MATRIX_SIZE (XVMIX_CSC_MAX_ROWS * XVMIX_CSC_MAX_COLS)
+#define XVMIX_CSC_COEFF_SIZE (12)
+#define XVMIX_CSC_SCALE_FACTOR (4096)
+#define XVMIX_CSC_DIVISOR (10000)
+
+/*************************** STATIC DATA ************************************/
+static const s16
+xlnx_mix_yuv2rgb_coeffs[][DRM_COLOR_ENCODING_MAX][XVMIX_CSC_COEFF_SIZE] = {
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 10000, 0, 13669,
+ 10000, -3367, -6986,
+ 10000, 17335, 0,
+ -175, 132, -222
+ },
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 10479, 0, 13979,
+ 10479, -3443, -7145,
+ 10479, 17729, 0,
+ -179, 136, -227
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 10000, 0, 15406,
+ 10000, -1832, -4579,
+ 10000, 18153, 0,
+ -197, 82, -232
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 10233, 0, 15756,
+ 10233, -1873, -4683,
+ 10233, 18566, 0,
+ -202, 84, -238
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 10000, 0, 14426,
+ 10000, -1609, -5589,
+ 10000, 18406, 0,
+ -185, 92, -236
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 10233, 0, 14754,
+ 10233, -1646, -5716,
+ 10233, 18824, 0,
+ -189, 94, -241
+ }
+};
+
+static const s16
+xlnx_mix_rgb2yuv_coeffs[][DRM_COLOR_ENCODING_MAX][XVMIX_CSC_COEFF_SIZE] = {
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 2990, 5870, 1440,
+ -1720, -3390, 5110,
+ 5110, -4280, -830,
+ 0, 128, 128
+ },
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 2921, 5735, 1113,
+ -1686, -3310, 4393,
+ 4393, -4184, -812,
+ 0, 128, 128
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 2120, 7150, 720,
+ -1170, -3940, 5110,
+ 5110, -4640, -470,
+ 0, 128, 128
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 2077, 6988, 705,
+ -1144, -3582, 4997,
+ 4997, -4538, -458,
+ 0, 128, 128
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 2625, 6775, 592,
+ -1427, -3684, 5110,
+ 5110, -4699, -410,
+ 0, 128, 128
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 2566, 6625, 579,
+ -1396, -3602, 4997,
+ 4997, -4595, -401,
+ 0, 128, 128
+ }
+};
+
+static const u32 color_table[] = {
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_Y8,
+ DRM_FORMAT_Y10,
+ DRM_FORMAT_XVUY2101010,
+ DRM_FORMAT_VUY888,
+ DRM_FORMAT_XVUY8888,
+ DRM_FORMAT_XV15,
+ DRM_FORMAT_XV20,
+};
+
+/*********************** Inline Functions/Macros *****************************/
+#define to_mixer_hw(p) (&((p)->mixer->mixer_hw))
+#define to_xlnx_crtc(x) container_of(x, struct xlnx_crtc, crtc)
+#define to_xlnx_plane(x) container_of(x, struct xlnx_mix_plane, base)
+#define to_xlnx_mixer(x) container_of(x, struct xlnx_mix, crtc)
+
+/**
+ * enum xlnx_mix_layer_id - Describes the layer by index to be acted upon
+ * @XVMIX_LAYER_MASTER: Master layer
+ * @XVMIX_LAYER_1: Layer 1
+ * @XVMIX_LAYER_2: Layer 2
+ * @XVMIX_LAYER_3: Layer 3
+ * @XVMIX_LAYER_4: Layer 4
+ * @XVMIX_LAYER_5: Layer 5
+ * @XVMIX_LAYER_6: Layer 6
+ * @XVMIX_LAYER_7: Layer 7
+ * @XVMIX_LAYER_8: Layer 8
+ * @XVMIX_LAYER_9: Layer 9
+ * @XVMIX_LAYER_10: Layer 10
+ * @XVMIX_LAYER_11: Layer 11
+ * @XVMIX_LAYER_12: Layer 12
+ * @XVMIX_LAYER_13: Layer 13
+ * @XVMIX_LAYER_14: Layer 14
+ * @XVMIX_LAYER_15: Layer 15
+ * @XVMIX_LAYER_16: Layer 16
+ */
+enum xlnx_mix_layer_id {
+ XVMIX_LAYER_MASTER = 0,
+ XVMIX_LAYER_1,
+ XVMIX_LAYER_2,
+ XVMIX_LAYER_3,
+ XVMIX_LAYER_4,
+ XVMIX_LAYER_5,
+ XVMIX_LAYER_6,
+ XVMIX_LAYER_7,
+ XVMIX_LAYER_8,
+ XVMIX_LAYER_9,
+ XVMIX_LAYER_10,
+ XVMIX_LAYER_11,
+ XVMIX_LAYER_12,
+ XVMIX_LAYER_13,
+ XVMIX_LAYER_14,
+ XVMIX_LAYER_15,
+ XVMIX_LAYER_16
+};
+
+/**
+ * struct xlnx_mix_layer_data - Describes the hardware configuration of a given
+ * mixer layer
+ * @hw_config: struct specifying the IP hardware constraints for this layer
+ * @vid_fmt: DRM format for this layer
+ * @can_alpha: Indicates that layer alpha is enabled for this layer
+ * @can_scale: Indicates that layer scaling is enabled for this layer
+ * @is_streaming: Indicates layer is not using mixer DMA but streaming from
+ * external DMA
+ * @max_width: Max possible pixel width
+ * @max_height: Max possible pixel height
+ * @min_width: Min possible pixel width
+ * @min_height: Min possible pixel height
+ * @layer_regs: struct containing current cached register values
+ * @buff_addr: Current physical address of image buffer
+ * @x_pos: Current CRTC x offset
+ * @y_pos: Current CRTC y offset
+ * @width: Current width in pixels
+ * @height: Current hight in pixels
+ * @stride: Current stride (when Mixer is performing DMA)
+ * @alpha: Current alpha setting
+ * @is_active: Logical flag indicating layer in use. If false, calls to
+ * enable layer will be ignored.
+ * @scale_fact: Current scaling factor applied to layer
+ * @id: The logical layer id identifies which layer this struct describes
+ * (e.g. 0 = master, 1-15 = overlay).
+ *
+ * All mixer layers are reprsented by an instance of this struct:
+ * output streaming, overlay, logo.
+ * Current layer-specific register state is stored in the layer_regs struct.
+ * The hardware configuration is stored in struct hw_config.
+ *
+ * Note:
+ * Some properties of the logo layer are unique and not described in this
+ * struct. Those properites are part of the xlnx_mix struct as global
+ * properties.
+ */
+struct xlnx_mix_layer_data {
+ struct {
+ u32 vid_fmt;
+ bool can_alpha;
+ bool can_scale;
+ bool is_streaming;
+ u32 max_width;
+ u32 max_height;
+ u32 min_width;
+ u32 min_height;
+ } hw_config;
+
+ struct {
+ u64 buff_addr1;
+ u64 buff_addr2;
+ u32 x_pos;
+ u32 y_pos;
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 alpha;
+ bool is_active;
+ u32 scale_fact;
+ } layer_regs;
+
+ enum xlnx_mix_layer_id id;
+};
+
+/**
+ * struct xlnx_mix_hw - Describes a mixer IP block instance within the design
+ * @base: Base physical address of Mixer IP in memory map
+ * @logo_layer_en: Indicates logo layer is enabled in hardware
+ * @logo_pixel_alpha_enabled: Indicates that per-pixel alpha supported for logo
+ * layer
+ * @csc_enabled: Indicates that colorimetry coefficients are programmable
+ * @max_layer_width: Max possible width for any layer on this Mixer
+ * @max_layer_height: Max possible height for any layer on this Mixer
+ * @max_logo_layer_width: Min possible width for any layer on this Mixer
+ * @max_logo_layer_height: Min possible height for any layer on this Mixer
+ * @num_layers: Max number of layers (excl: logo)
+ * @bg_layer_bpc: Bits per component for the background streaming layer
+ * @dma_addr_size: dma address size in bits
+ * @ppc: Pixels per component
+ * @irq: Interrupt request number assigned
+ * @bg_color: Current RGB color value for internal background color generator
+ * @layer_data: Array of layer data
+ * @layer_cnt: Layer data array count
+ * @max_layers: Maximum number of layers supported by hardware
+ * @logo_layer_id: Index of logo layer
+ * @logo_en_mask: Mask used to enable logo layer
+ * @enable_all_mask: Mask used to enable all layers
+ * @reset_gpio: GPIO line used to reset IP between modesetting operations
+ * @intrpt_handler_fn: Interrupt handler function called when frame is completed
+ * @intrpt_data: Data pointer passed to interrupt handler
+ *
+ * Used as the primary data structure for many L2 driver functions. Logo layer
+ * data, if enabled within the IP, is described in this structure. All other
+ * layers are described by an instance of xlnx_mix_layer_data referenced by this
+ * struct.
+ *
+ */
+struct xlnx_mix_hw {
+ void __iomem *base;
+ bool logo_layer_en;
+ bool logo_pixel_alpha_enabled;
+ u32 csc_enabled;
+ u32 max_layer_width;
+ u32 max_layer_height;
+ u32 max_logo_layer_width;
+ u32 max_logo_layer_height;
+ u32 num_layers;
+ u32 bg_layer_bpc;
+ u32 dma_addr_size;
+ u32 ppc;
+ int irq;
+ u64 bg_color;
+ struct xlnx_mix_layer_data *layer_data;
+ u32 layer_cnt;
+ u32 max_layers;
+ u32 logo_layer_id;
+ u32 logo_en_mask;
+ u32 enable_all_mask;
+ struct gpio_desc *reset_gpio;
+ void (*intrpt_handler_fn)(void *);
+ void *intrpt_data;
+};
+
+/**
+ * struct xlnx_mix - Container for interfacing DRM driver to mixer
+ * @mixer_hw: Object representing actual hardware state of mixer
+ * @master: Logical master device from xlnx drm
+ * @crtc: Xilinx DRM driver crtc object
+ * @drm_primary_layer: Hardware layer serving as logical DRM primary layer
+ * @hw_master_layer: Base video streaming layer
+ * @hw_logo_layer: Hardware logo layer
+ * @planes: Mixer overlay layers
+ * @num_planes : number of planes
+ * @max_width : maximum width of plane
+ * @max_height : maximum height of plane
+ * @max_cursor_width : maximum cursor width
+ * @max_cursor_height: maximum cursor height
+ * @alpha_prop: Global layer alpha property
+ * @scale_prop: Layer scale property (1x, 2x or 4x)
+ * @bg_color: Background color property for primary layer
+ * @drm: core drm object
+ * @pixel_clock: pixel clock for mixer
+ * @pixel_clock_enabled: pixel clock status
+ * @dpms: mixer drm state
+ * @event: vblank pending event
+ * @vtc_bridge: vtc_bridge structure
+ *
+ * Contains pointers to logical constructions such as the DRM plane manager as
+ * well as pointers to distinquish the mixer layer serving as the DRM "primary"
+ * plane from the actual mixer layer which serves as the background layer in
+ * hardware.
+ *
+ */
+struct xlnx_mix {
+ struct xlnx_mix_hw mixer_hw;
+ struct platform_device *master;
+ struct xlnx_crtc crtc;
+ struct xlnx_mix_plane *drm_primary_layer;
+ struct xlnx_mix_plane *hw_master_layer;
+ struct xlnx_mix_plane *hw_logo_layer;
+ struct xlnx_mix_plane *planes;
+ u32 num_planes;
+ u32 max_width;
+ u32 max_height;
+ u32 max_cursor_width;
+ u32 max_cursor_height;
+ struct drm_property *alpha_prop;
+ struct drm_property *scale_prop;
+ struct drm_property *bg_color;
+ struct drm_device *drm;
+ struct clk *pixel_clock;
+ bool pixel_clock_enabled;
+ int dpms;
+ struct drm_pending_vblank_event *event;
+ struct xlnx_bridge *vtc_bridge;
+};
+
+/**
+ * struct xlnx_mix_plane_dma - Xilinx drm plane VDMA object
+ *
+ * @chan: dma channel
+ * @xt: dma interleaved configuration template
+ * @sgl: data chunk for dma_interleaved_template
+ * @is_active: flag if the DMA is active
+ */
+struct xlnx_mix_plane_dma {
+ struct dma_chan *chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+ bool is_active;
+};
+
+/**
+ * struct xlnx_mix_plane - Xilinx drm plane object
+ *
+ * @base: base drm plane object
+ * @mixer_layer: video mixer hardware layer data instance
+ * @mixer: mixer DRM object
+ * @dma: dma object
+ * @id: plane id
+ * @dpms: current dpms level
+ * @format: pixel format
+ */
+struct xlnx_mix_plane {
+ struct drm_plane base;
+ struct xlnx_mix_layer_data *mixer_layer;
+ struct xlnx_mix *mixer;
+ struct xlnx_mix_plane_dma dma[XVMIX_MAX_NUM_SUB_PLANES];
+ int id;
+ int dpms;
+ u32 format;
+};
+
+static inline void reg_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline void reg_writeq(void __iomem *base, int offset, u64 val)
+{
+ writel(lower_32_bits(val), base + offset);
+ writel(upper_32_bits(val), base + offset + 4);
+}
+
+static inline u32 reg_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_mix_intrpt_enable_done - Enables interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Enables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_enable_done(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ /* Enable Interrupts */
+ reg_writel(mixer->base, XVMIX_IER, curr_val | XVMIX_IRQ_DONE_MASK);
+ reg_writel(mixer->base, XVMIX_GIE, XVMIX_GIE_EN_MASK);
+}
+
+/**
+ * xlnx_mix_intrpt_disable - Disable interrupts
+ * @mixer: instance of mixer IP core
+ *
+ * Disables interrupts in the mixer core
+ */
+static void xlnx_mix_intrpt_disable(struct xlnx_mix_hw *mixer)
+{
+ u32 curr_val = reg_readl(mixer->base, XVMIX_IER);
+
+ reg_writel(mixer->base, XVMIX_IER, curr_val & (~XVMIX_IRQ_DONE_MASK));
+ reg_writel(mixer->base, XVMIX_GIE, 0);
+}
+
+/**
+ * xlnx_mix_start - Start the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_start(struct xlnx_mix_hw *mixer)
+{
+ u32 val;
+
+ val = XVMIX_AP_RST_MASK | XVMIX_AP_EN_MASK;
+ reg_writel(mixer->base, XVMIX_AP_CTRL, val);
+}
+
+/**
+ * xlnx_mix_stop - Stop the mixer core video generator
+ * @mixer: Mixer core instance for which to stop video output
+ *
+ * Starts the core to generate a video frame.
+ */
+static void xlnx_mix_stop(struct xlnx_mix_hw *mixer)
+{
+ reg_writel(mixer->base, XVMIX_AP_CTRL, 0);
+}
+
+static inline uint32_t xlnx_mix_get_intr_status(struct xlnx_mix_hw *mixer)
+{
+ return reg_readl(mixer->base, XVMIX_ISR) & XVMIX_IRQ_DONE_MASK;
+}
+
+static inline void xlnx_mix_clear_intr_status(struct xlnx_mix_hw *mixer,
+ uint32_t intr)
+{
+ reg_writel(mixer->base, XVMIX_ISR, intr);
+}
+
+/**
+ * xlnx_mix_set_yuv2_rgb_coeff - Programs yuv to rgb coeffiecients
+ * @plane: Xilinx drm plane object
+ * @enc: Colorimetry encoding scheme
+ * @range: Colorimetry range
+ * Programs the colorimetry coefficients required for yuv to rgb
+ * conversion.
+ */
+static void xlnx_mix_set_yuv2_rgb_coeff(struct xlnx_mix_plane *plane,
+ enum drm_color_encoding enc,
+ enum drm_color_range range)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ u32 i;
+ u32 bpc_scale = 1 << (mixer->mixer_hw.bg_layer_bpc - 8);
+
+ for (i = 0; i < XVMIX_CSC_MATRIX_SIZE; i++)
+ reg_writel(mixer->mixer_hw.base, XVMIX_K00_1 + i * 8,
+ xlnx_mix_yuv2rgb_coeffs[enc][range][i] *
+ XVMIX_CSC_SCALE_FACTOR / XVMIX_CSC_DIVISOR);
+
+ for (i = XVMIX_CSC_MATRIX_SIZE; i < XVMIX_CSC_COEFF_SIZE; i++)
+ reg_writel(mixer->mixer_hw.base, XVMIX_K00_1 + i * 8,
+ (xlnx_mix_yuv2rgb_coeffs[enc][range][i] *
+ bpc_scale));
+}
+
+/**
+ * xlnx_mix_set_rgb2_yuv_coeff - Programs rgb to yuv coeffiecients
+ * @plane: Xilinx drm plane object
+ * @enc: Colorimetry encoding scheme
+ * @range: Colorimetry range
+ * Programs the colorimetry coefficients required for rgb to yuv
+ * conversion.
+ */
+static void xlnx_mix_set_rgb2_yuv_coeff(struct xlnx_mix_plane *plane,
+ enum drm_color_encoding enc,
+ enum drm_color_range range)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ u32 i;
+ u32 bpc_scale = 1 << (mixer->mixer_hw.bg_layer_bpc - 8);
+
+ for (i = 0; i < XVMIX_CSC_MATRIX_SIZE; i++)
+ reg_writel(mixer->mixer_hw.base, XVMIX_K00_2 + i * 8,
+ xlnx_mix_rgb2yuv_coeffs[enc][range][i] *
+ XVMIX_CSC_SCALE_FACTOR / XVMIX_CSC_DIVISOR);
+
+ for (i = XVMIX_CSC_MATRIX_SIZE; i < XVMIX_CSC_COEFF_SIZE; i++)
+ reg_writel(mixer->mixer_hw.base, XVMIX_K00_2 + i * 8,
+ (xlnx_mix_rgb2yuv_coeffs[enc][range][i] *
+ bpc_scale));
+}
+
+/**
+ * xlnx_mix_get_layer_data - Retrieve current hardware and register
+ * values for a logical video layer
+ * @mixer: Mixer instance to interrogate
+ * @id: Id of layer for which data is requested
+ *
+ * Return:
+ * Structure containing layer-specific data; NULL upon failure
+ */
+static struct xlnx_mix_layer_data *
+xlnx_mix_get_layer_data(struct xlnx_mix_hw *mixer, enum xlnx_mix_layer_id id)
+{
+ u32 i;
+ struct xlnx_mix_layer_data *layer_data;
+
+ for (i = 0; i <= (mixer->layer_cnt - 1); i++) {
+ layer_data = &mixer->layer_data[i];
+ if (layer_data->id == id)
+ return layer_data;
+ }
+ return NULL;
+}
+
+/**
+ * xlnx_mix_set_active_area - Sets the number of active horizontal and
+ * vertical scan lines for the mixer background layer.
+ * @mixer: Mixer instance for which to set a new viewable area
+ * @hactive: Width of new background image dimension
+ * @vactive: Height of new background image dimension
+ *
+ * Minimum values are 64x64 with maximum values determined by the IP hardware
+ * design.
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_active_area(struct xlnx_mix_hw *mixer,
+ u32 hactive, u32 vactive)
+{
+ struct xlnx_mix_layer_data *ld =
+ xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ if (hactive > ld->hw_config.max_width ||
+ vactive > ld->hw_config.max_height) {
+ DRM_ERROR("Invalid layer dimention\n");
+ return -EINVAL;
+ }
+ /* set resolution */
+ reg_writel(mixer->base, XVMIX_HEIGHT_DATA, vactive);
+ reg_writel(mixer->base, XVMIX_WIDTH_DATA, hactive);
+ ld->layer_regs.width = hactive;
+ ld->layer_regs.height = vactive;
+
+ return 0;
+}
+
+/**
+ * is_window_valid - Validate requested plane dimensions
+ * @mixer: Mixer core instance for which to stop video output
+ * @x_pos: x position requested for start of plane
+ * @y_pos: y position requested for start of plane
+ * @width: width of plane
+ * @height: height of plane
+ * @scale: scale factor of plane
+ *
+ * Validates if the requested window is within the frame boundary
+ *
+ * Return:
+ * true on success, false on failure
+ */
+static bool is_window_valid(struct xlnx_mix_hw *mixer, u32 x_pos, u32 y_pos,
+ u32 width, u32 height, u32 scale)
+{
+ struct xlnx_mix_layer_data *master_layer;
+ int scale_factor[3] = {1, 2, 4};
+
+ master_layer = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+
+ /* Check if window scale factor is set */
+ if (scale < XVMIX_SCALE_FACTOR_INVALID) {
+ width *= scale_factor[scale];
+ height *= scale_factor[scale];
+ }
+
+ /* verify overlay falls within currently active background area */
+ if (((x_pos + width) <= master_layer->layer_regs.width) &&
+ ((y_pos + height) <= master_layer->layer_regs.height))
+ return true;
+
+ DRM_ERROR("Requested plane dimensions can't be set\n");
+ return false;
+}
+
+/**
+ * xlnx_mix_layer_enable - Enables the requested layers
+ * @mixer: Mixer instance in which to enable a video layer
+ * @id: Logical id (e.g. 16 = logo layer) to enable
+ *
+ * Enables (permit video output) for layers in mixer
+ * Enables the layer denoted by id in the IP core.
+ * Layer 0 will indicate the background layer and layer 8 the logo
+ * layer. Passing max layers value will enable all
+ */
+static void xlnx_mix_layer_enable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 curr_state;
+
+ /* Ensure layer is marked as 'active' by application before
+ * turning on in hardware. In some cases, layer register data
+ * may be written to otherwise inactive layers in lieu of, eventually,
+ * turning them on.
+ */
+ layer_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!layer_data) {
+ DRM_ERROR("Invalid layer id %d\n", id);
+ return;
+ }
+ if (!layer_data->layer_regs.is_active)
+ return; /* for inactive layers silently return */
+
+ /* Check if request is to enable all layers or single layer */
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ mixer->enable_all_mask);
+
+ } else if ((id < mixer->layer_cnt) || ((id == mixer->logo_layer_id) &&
+ mixer->logo_layer_en)) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state |= mixer->logo_en_mask;
+ else
+ curr_state |= BIT(id);
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't enable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_enable - Enables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to enable
+ *
+ */
+static void xlnx_mix_disp_layer_enable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix_layer_data *l_data;
+ u32 id;
+
+ if (!plane)
+ return;
+ mixer_hw = to_mixer_hw(plane);
+ l_data = plane->mixer_layer;
+ id = l_data->id;
+ if (id < XVMIX_LAYER_MASTER || id > mixer_hw->logo_layer_id) {
+ DRM_DEBUG_KMS("Attempt to activate invalid layer: %d\n", id);
+ return;
+ }
+ if (id == XVMIX_LAYER_MASTER && !l_data->hw_config.is_streaming)
+ return;
+
+ xlnx_mix_layer_enable(mixer_hw, id);
+}
+
+/**
+ * xlnx_mix_layer_disable - Disables the requested layer
+ * @mixer: Mixer for which the layer will be disabled
+ * @id: Logical id of the layer to be disabled (0-16)
+ *
+ * Disables the layer denoted by layer_id in the IP core.
+ * Layer 0 will indicate the background layer and layer 16 the logo
+ * layer. Passing the value of max layers will disable all
+ * layers.
+ */
+static void xlnx_mix_layer_disable(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ u32 num_layers, curr_state;
+
+ num_layers = mixer->layer_cnt;
+
+ if (id == mixer->max_layers) {
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA,
+ XVMIX_MASK_DISABLE_ALL_LAYERS);
+ } else if ((id < num_layers) ||
+ ((id == mixer->logo_layer_id) && (mixer->logo_layer_en))) {
+ curr_state = reg_readl(mixer->base, XVMIX_LAYERENABLE_DATA);
+ if (id == mixer->logo_layer_id)
+ curr_state &= ~(mixer->logo_en_mask);
+ else
+ curr_state &= ~(BIT(id));
+ reg_writel(mixer->base, XVMIX_LAYERENABLE_DATA, curr_state);
+ } else {
+ DRM_ERROR("Can't disable requested layer %d\n", id);
+ }
+}
+
+/**
+ * xlnx_mix_disp_layer_disable - Disables video output represented by the
+ * plane object
+ * @plane: Drm plane object describing video layer to disable
+ *
+ */
+static void xlnx_mix_disp_layer_disable(struct xlnx_mix_plane *plane)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ u32 layer_id;
+
+ if (plane)
+ mixer_hw = to_mixer_hw(plane);
+ else
+ return;
+ layer_id = plane->mixer_layer->id;
+ if (layer_id < XVMIX_LAYER_MASTER ||
+ layer_id > mixer_hw->logo_layer_id)
+ return;
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+}
+
+static int xlnx_mix_mark_layer_inactive(struct xlnx_mix_plane *plane)
+{
+ if (!plane || !plane->mixer_layer)
+ return -ENODEV;
+
+ plane->mixer_layer->layer_regs.is_active = false;
+
+ return 0;
+}
+
+/* apply mode to plane pipe */
+static void xlnx_mix_plane_commit(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ /* for xlnx video framebuffer dma, if used */
+ xilinx_xdma_drm_config(plane->dma[0].chan, plane->format);
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ struct xlnx_mix_plane_dma *dma = &plane->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt,
+ flags);
+ if (!desc) {
+ DRM_ERROR("failed to prepare DMA descriptor\n");
+ return;
+ }
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+}
+
+static int xlnx_mix_plane_get_max_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_width;
+}
+
+static int xlnx_mix_plane_get_max_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_height;
+}
+
+static int xlnx_mix_plane_get_max_cursor_width(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_width;
+}
+
+static int xlnx_mix_plane_get_max_cursor_height(struct drm_plane *base_plane)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+
+ return plane->mixer->max_cursor_height;
+}
+
+static int xlnx_mix_crtc_get_max_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_width(crtc->crtc.primary);
+}
+
+static int xlnx_mix_crtc_get_max_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_height(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_width(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_width(crtc->crtc.primary);
+}
+
+static unsigned int xlnx_mix_crtc_get_max_cursor_height(struct xlnx_crtc *crtc)
+{
+ return xlnx_mix_plane_get_max_cursor_height(crtc->crtc.primary);
+}
+
+/**
+ * xlnx_mix_crtc_get_format - Get the current device format
+ * @crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_mix_crtc_get_format(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+
+ return plane->format;
+}
+
+/**
+ * xlnx_mix_crtc_get_align - Get the alignment value for pitch
+ * @crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_mix_crtc_get_align(struct xlnx_crtc *crtc)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(crtc->crtc.primary);
+ struct xlnx_mix *m = plane->mixer;
+
+ return XVMIX_BASE_ALIGN * m->mixer_hw.ppc;
+}
+
+/**
+ * xlnx_mix_attach_plane_prop - Attach mixer-specific drm property to
+ * the given plane
+ * @plane: Xilinx drm plane object to inspect and attach appropriate
+ * properties to
+ *
+ * The linked mixer layer will be inspected to see what capabilities it offers
+ * (e.g. global layer alpha; scaling) and drm property objects that indicate
+ * those capabilities will then be attached and initialized to default values.
+ */
+static void xlnx_mix_attach_plane_prop(struct xlnx_mix_plane *plane)
+{
+ struct drm_mode_object *base = &plane->base.base;
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (plane->mixer_layer->hw_config.can_scale)
+ drm_object_attach_property(base, mixer->scale_prop,
+ XVMIX_SCALE_FACTOR_1X);
+ if (plane->mixer_layer->hw_config.can_alpha)
+ drm_object_attach_property(base, mixer->alpha_prop,
+ XVMIX_ALPHA_MAX);
+ if (mixer->mixer_hw.csc_enabled) {
+ u32 supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020);
+ u32 supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE);
+ enum drm_color_encoding encoding = DRM_COLOR_YCBCR_BT709;
+ enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ drm_plane_create_color_properties(&plane->base,
+ supported_encodings,
+ supported_ranges,
+ encoding, range);
+ }
+}
+
+static int xlnx_mix_mark_layer_active(struct xlnx_mix_plane *plane)
+{
+ if (!plane->mixer_layer)
+ return -ENODEV;
+ plane->mixer_layer->layer_regs.is_active = true;
+
+ return 0;
+}
+
+static bool xlnx_mix_isfmt_support(u32 format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(color_table); i++) {
+ if (format == color_table[i])
+ return true;
+ }
+ return false;
+}
+
+/*************** DISPLAY ************/
+
+/**
+ * xlnx_mix_get_layer_scaling - Get layer scaling factor
+ * @mixer: Mixer instance to program with new background color
+ * @id: Plane id
+ *
+ * Applicable only for overlay layers
+ *
+ * Return:
+ * scaling factor of the specified layer
+ */
+static int xlnx_mix_get_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id)
+{
+ int scale_factor = 0;
+ u32 reg;
+ struct xlnx_mix_layer_data *l_data = xlnx_mix_get_layer_data(mixer, id);
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOSCALEFACTOR_DATA;
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ } else {
+ /*Layer0-Layer15*/
+ if (id < mixer->logo_layer_id && l_data->hw_config.can_scale) {
+ reg = XVMIX_LAYERSCALE_0_DATA + (id * XVMIX_REG_OFFSET);
+ scale_factor = reg_readl(mixer->base, reg);
+ l_data->layer_regs.scale_fact = scale_factor;
+ }
+ }
+ return scale_factor;
+}
+
+/**
+ * xlnx_mix_set_layer_window - Sets the position of an overlay layer
+ * @mixer: Specific mixer object instance controlling the video
+ * @id: Logical layer id (1-15) to be positioned
+ * @x_pos: new: Column to start display of overlay layer
+ * @y_pos: new: Row to start display of overlay layer
+ * @width: Number of active columns to dislay for overlay layer
+ * @height: Number of active columns to display for overlay layer
+ * @stride: Width in bytes of overaly memory buffer (memory layer only)
+ *
+ * Sets the position of an overlay layer over the background layer (layer 0)
+ * Applicable only for layers 1-15 or the logo layer
+ *
+ * Return:
+ * Zero on success, -EINVAL if position is invalid or -ENODEV if layer
+ */
+static int xlnx_mix_set_layer_window(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 x_pos,
+ u32 y_pos, u32 width, u32 height,
+ u32 stride)
+{
+ struct xlnx_mix_layer_data *l_data;
+ u32 scale = 0;
+ int status = -EINVAL;
+ u32 x_reg, y_reg, w_reg, h_reg, s_reg;
+ u32 off;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ if (!l_data)
+ return status;
+
+ scale = xlnx_mix_get_layer_scaling(mixer, id);
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return status;
+
+ if (id == mixer->logo_layer_id) {
+ if (!(mixer->logo_layer_en &&
+ width <= l_data->hw_config.max_width &&
+ height <= l_data->hw_config.max_height &&
+ height >= l_data->hw_config.min_height &&
+ width >= l_data->hw_config.min_width))
+ return status;
+
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS) {
+ x_reg = XVMIX_LOGOSTARTX_DATA + XVMIX_LOGO_OFFSET;
+ y_reg = XVMIX_LOGOSTARTY_DATA + XVMIX_LOGO_OFFSET;
+ w_reg = XVMIX_LOGOWIDTH_DATA + XVMIX_LOGO_OFFSET;
+ h_reg = XVMIX_LOGOHEIGHT_DATA + XVMIX_LOGO_OFFSET;
+ } else {
+ x_reg = XVMIX_LOGOSTARTX_DATA;
+ y_reg = XVMIX_LOGOSTARTY_DATA;
+ w_reg = XVMIX_LOGOWIDTH_DATA;
+ h_reg = XVMIX_LOGOHEIGHT_DATA;
+ }
+ reg_writel(mixer->base, x_reg, x_pos);
+ reg_writel(mixer->base, y_reg, y_pos);
+ reg_writel(mixer->base, w_reg, width);
+ reg_writel(mixer->base, h_reg, height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+ status = 0;
+ } else {
+ /*Layer1-Layer15*/
+
+ if (!(id < mixer->layer_cnt &&
+ width <= l_data->hw_config.max_width &&
+ width >= l_data->hw_config.min_width))
+ return status;
+ x_reg = XVMIX_LAYERSTARTX_0_DATA;
+ y_reg = XVMIX_LAYERSTARTY_0_DATA;
+ w_reg = XVMIX_LAYERWIDTH_0_DATA;
+ h_reg = XVMIX_LAYERHEIGHT_0_DATA;
+ s_reg = XVMIX_LAYERSTRIDE_0_DATA;
+
+ off = id * XVMIX_REG_OFFSET;
+ reg_writel(mixer->base, (x_reg + off), x_pos);
+ reg_writel(mixer->base, (y_reg + off), y_pos);
+ reg_writel(mixer->base, (w_reg + off), width);
+ reg_writel(mixer->base, (h_reg + off), height);
+ l_data->layer_regs.x_pos = x_pos;
+ l_data->layer_regs.y_pos = y_pos;
+ l_data->layer_regs.width = width;
+ l_data->layer_regs.height = height;
+
+ if (!l_data->hw_config.is_streaming)
+ reg_writel(mixer->base, (s_reg + off), stride);
+ status = 0;
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_dimensions - Set layer dimensions
+ * @plane: Drm plane object desribing video layer to reposition
+ * @crtc_x: New horizontal anchor postion from which to begin rendering
+ * @crtc_y: New vertical anchor position from which to begin rendering
+ * @width: Width, in pixels, to render from stream or memory buffer
+ * @height: Height, in pixels, to render from stream or memory buffer
+ * @stride: Width, in bytes, of a memory buffer. Used only for
+ * memory layers. Use 0 for streaming layers.
+ *
+ * Establishes new coordinates and dimensions for a video plane layer
+ * New size and coordinates of window must fit within the currently active
+ * area of the crtc (e.g. the background resolution)
+ *
+ * Return: 0 if successful; Either -EINVAL if coordindate data is invalid
+ * or -ENODEV if layer data not present
+ */
+static int xlnx_mix_set_layer_dimensions(struct xlnx_mix_plane *plane,
+ u32 crtc_x, u32 crtc_y,
+ u32 width, u32 height, u32 stride)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer_data;
+ enum xlnx_mix_layer_id layer_id;
+ int ret = 0;
+
+ layer_data = plane->mixer_layer;
+ layer_id = layer_data->id;
+ if (layer_data->layer_regs.height != height ||
+ layer_data->layer_regs.width != width) {
+ if (mixer->drm_primary_layer == plane)
+ xlnx_mix_layer_disable(mixer_hw, XVMIX_LAYER_MASTER);
+
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ }
+ if (mixer->drm_primary_layer == plane) {
+ crtc_x = 0;
+ crtc_y = 0;
+ ret = xlnx_mix_set_active_area(mixer_hw, width, height);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, XVMIX_LAYER_MASTER);
+ }
+ if (layer_id != XVMIX_LAYER_MASTER && layer_id < mixer_hw->max_layers) {
+ ret = xlnx_mix_set_layer_window(mixer_hw, layer_id, crtc_x,
+ crtc_y, width, height, stride);
+ if (ret)
+ return ret;
+ xlnx_mix_disp_layer_enable(plane);
+ }
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_scaling - Sets scaling factor
+ * @mixer: Instance of mixer to be subject of scaling request
+ * @id: Logical id of video layer subject to new scale setting
+ * @scale: scale Factor (1x, 2x or 4x) for horiz. and vert. dimensions
+ *
+ * Sets the scaling factor for the specified video layer
+ * Not applicable to background stream layer (layer 0)
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure to set scale for layer (likely
+ * returned if resulting size of layer exceeds dimensions of active
+ * display area
+ */
+static int xlnx_mix_set_layer_scaling(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id, u32 scale)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *l_data;
+ int status = 0;
+ u32 x_pos, y_pos, width, height, offset;
+
+ l_data = xlnx_mix_get_layer_data(mixer, id);
+ x_pos = l_data->layer_regs.x_pos;
+ y_pos = l_data->layer_regs.y_pos;
+ width = l_data->layer_regs.width;
+ height = l_data->layer_regs.height;
+
+ if (!is_window_valid(mixer, x_pos, y_pos, width, height, scale))
+ return -EINVAL;
+
+ if (id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA +
+ XVMIX_LOGO_OFFSET, scale);
+ else
+ reg_writel(reg, XVMIX_LOGOSCALEFACTOR_DATA,
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ } else {
+ /* Layer0-Layer15 */
+ if (id < mixer->layer_cnt && l_data->hw_config.can_scale) {
+ offset = id * XVMIX_REG_OFFSET;
+
+ reg_writel(reg, (XVMIX_LAYERSCALE_0_DATA + offset),
+ scale);
+ l_data->layer_regs.scale_fact = scale;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_set_layer_scale - Change video scale factor for video plane
+ * @plane: Drm plane object describing layer to be modified
+ * @val: Index of scale factor to use:
+ * 0 = 1x
+ * 1 = 2x
+ * 2 = 4x
+ *
+ * Return:
+ * Zero on success, either -EINVAL if scale value is illegal or
+ * -ENODEV if layer does not exist (null)
+ */
+static int xlnx_mix_set_layer_scale(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+ int ret;
+
+ if (!layer || !layer->hw_config.can_scale)
+ return -ENODEV;
+ if (val > XVMIX_SCALE_FACTOR_4X || val < XVMIX_SCALE_FACTOR_1X) {
+ DRM_ERROR("Mixer layer scale value illegal.\n");
+ return -EINVAL;
+ }
+ xlnx_mix_disp_layer_disable(plane);
+ msleep(50);
+ ret = xlnx_mix_set_layer_scaling(mixer_hw, layer->id, val);
+ xlnx_mix_disp_layer_enable(plane);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_layer_alpha - Set the alpha value
+ * @mixer: Instance of mixer controlling layer to modify
+ * @layer_id: Logical id of video overlay to adjust alpha setting
+ * @alpha: Desired alpha setting (0-255) for layer specified
+ * 255 = completely opaque
+ * 0 = fully transparent
+ *
+ * Set the layer global transparency for a video overlay
+ * Not applicable to background streaming layer
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_alpha(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id layer_id, u32 alpha)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 reg;
+ int status = -EINVAL;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, layer_id);
+
+ if (layer_id == mixer->logo_layer_id) {
+ if (mixer->logo_layer_en) {
+ if (mixer->max_layers > XVMIX_MAX_OVERLAY_LAYERS)
+ reg = XVMIX_LOGOALPHA_DATA + XVMIX_LOGO_OFFSET;
+ else
+ reg = XVMIX_LOGOALPHA_DATA;
+ reg_writel(mixer->base, reg, alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ } else {
+ /*Layer1-Layer15*/
+ if (layer_id < mixer->layer_cnt &&
+ layer_data->hw_config.can_alpha) {
+ u32 offset = layer_id * XVMIX_REG_OFFSET;
+
+ reg = XVMIX_LAYERALPHA_0_DATA;
+ reg_writel(mixer->base, (reg + offset), alpha);
+ layer_data->layer_regs.alpha = alpha;
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * xlnx_mix_disp_set_layer_alpha - Change the transparency of an entire plane
+ * @plane: Video layer affected by new alpha setting
+ * @val: Value of transparency setting (0-255) with 255 being opaque
+ * 0 being fully transparent
+ *
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_disp_set_layer_alpha(struct xlnx_mix_plane *plane,
+ uint64_t val)
+{
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ struct xlnx_mix_layer_data *layer = plane->mixer_layer;
+
+ if (!layer || !layer->hw_config.can_alpha)
+ return -ENODEV;
+ if (val > XVMIX_ALPHA_MAX || val < XVMIX_ALPHA_MIN) {
+ DRM_ERROR("Mixer layer alpha dts value illegal.\n");
+ return -EINVAL;
+ }
+ return xlnx_mix_set_layer_alpha(mixer_hw, layer->id, val);
+}
+
+/**
+ * xlnx_mix_set_layer_buff_addr - Set buff addr for layer
+ * @mixer: Instance of mixer controlling layer to modify
+ * @id: Logical id of video overlay to adjust alpha setting
+ * @luma_addr: Start address of plane 1 of frame buffer for layer 1
+ * @chroma_addr: Start address of plane 2 of frame buffer for layer 1
+ *
+ * Sets the buffer address of the specified layer
+ * Return:
+ * Zero on success, -EINVAL on failure
+ */
+static int xlnx_mix_set_layer_buff_addr(struct xlnx_mix_hw *mixer,
+ enum xlnx_mix_layer_id id,
+ dma_addr_t luma_addr,
+ dma_addr_t chroma_addr)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ u32 align, offset;
+ u32 reg1, reg2;
+
+ if (id >= mixer->layer_cnt)
+ return -EINVAL;
+
+ /* Check if addr is aligned to aximm width (PPC * 64-bits) */
+ align = mixer->ppc * 8;
+ if ((luma_addr % align) != 0 || (chroma_addr % align) != 0)
+ return -EINVAL;
+
+ offset = (id - 1) * XVMIX_REG_OFFSET;
+ reg1 = XVMIX_LAYER1_BUF1_V_DATA + offset;
+ reg2 = XVMIX_LAYER1_BUF2_V_DATA + offset;
+ layer_data = &mixer->layer_data[id];
+ if (mixer->dma_addr_size == 64 && sizeof(dma_addr_t) == 8) {
+ reg_writeq(mixer->base, reg1, luma_addr);
+ reg_writeq(mixer->base, reg2, chroma_addr);
+ } else {
+ reg_writel(mixer->base, reg1, (u32)luma_addr);
+ reg_writel(mixer->base, reg2, (u32)chroma_addr);
+ }
+ layer_data->layer_regs.buff_addr1 = luma_addr;
+ layer_data->layer_regs.buff_addr2 = chroma_addr;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_hw_plane_dpms - Implementation of display power management
+ * system call (dpms).
+ * @plane: Plane/mixer layer to enable/disable (based on dpms value)
+ * @dpms: Display power management state to act upon
+ *
+ * Designed to disable and turn off a plane and restore all attached drm
+ * properities to their initial values. Alterntively, if dpms is "on", will
+ * enable a layer.
+ */
+
+static void
+xlnx_mix_hw_plane_dpms(struct xlnx_mix_plane *plane, int dpms)
+{
+ struct xlnx_mix *mixer;
+
+ if (!plane->mixer)
+ return;
+ mixer = plane->mixer;
+ plane->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_disp_layer_enable(plane);
+ break;
+ default:
+ xlnx_mix_mark_layer_inactive(plane);
+ xlnx_mix_disp_layer_disable(plane);
+ /* restore to default property values */
+ if (mixer->alpha_prop)
+ xlnx_mix_disp_set_layer_alpha(plane, XVMIX_ALPHA_MAX);
+ if (mixer->scale_prop)
+ xlnx_mix_set_layer_scale(plane, XVMIX_SCALE_FACTOR_1X);
+ }
+}
+
+static void xlnx_mix_plane_dpms(struct drm_plane *base_plane, int dpms)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ unsigned int i;
+
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("dpms: %d -> %d\n", plane->dpms, dpms);
+
+ if (plane->dpms == dpms)
+ return;
+ plane->dpms = dpms;
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ /* start dma engine */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan && plane->dma[i].is_active)
+ dma_async_issue_pending(plane->dma[i].chan);
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ break;
+ default:
+ xlnx_mix_hw_plane_dpms(plane, dpms);
+ /* stop dma engine and release descriptors */
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ if (plane->dma[i].chan && plane->dma[i].is_active) {
+ dmaengine_terminate_sync(plane->dma[i].chan);
+ plane->dma[i].is_active = false;
+ }
+ }
+ break;
+ }
+}
+
+static int
+xlnx_mix_disp_plane_atomic_set_property(struct drm_plane *base_plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+
+ if (property == mixer->alpha_prop)
+ return xlnx_mix_disp_set_layer_alpha(plane, val);
+ else if (property == mixer->scale_prop)
+ return xlnx_mix_set_layer_scale(plane, val);
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_mix_disp_plane_atomic_get_property(struct drm_plane *base_plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix *mixer = plane->mixer;
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ u32 layer_id = plane->mixer_layer->id;
+
+ if (property == mixer->alpha_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.alpha;
+ else if (property == mixer->scale_prop)
+ *val = mixer_hw->layer_data[layer_id].layer_regs.scale_fact;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_mix_disp_plane_atomic_update_plane - plane update using atomic
+ * @plane: plane object to update
+ * @crtc: owning CRTC of owning plane
+ * @fb: framebuffer to flip onto plane
+ * @crtc_x: x offset of primary plane on crtc
+ * @crtc_y: y offset of primary plane on crtc
+ * @crtc_w: width of primary plane rectangle on crtc
+ * @crtc_h: height of primary plane rectangle on crtc
+ * @src_x: x offset of @fb for panning
+ * @src_y: y offset of @fb for panning
+ * @src_w: width of source rectangle in @fb
+ * @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context
+ *
+ * Provides a default plane update handler using the atomic driver interface.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int
+xlnx_mix_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w,
+ unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs xlnx_mix_plane_funcs = {
+ .update_plane = xlnx_mix_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = xlnx_mix_disp_plane_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_plane_atomic_get_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * xlnx_mix_logo_load - Loads mixer's internal bram
+ * @mixer: Mixer instance to act upon
+ * @logo_w: Width of logo in pixels
+ * @logo_h: Height of logo in pixels
+ * @r_buf: Pointer to byte buffer array of R data values
+ * @g_buf: Pointer to byte buffer array of G data values
+ * @b_buf: Pointer to byte buffer array of B data values
+ * @a_buf: Pointer to byte buffer array of A data values
+ *
+ * Loads mixer's internal bram with planar R, G, B and A data
+ *
+ * Return:
+ * Zero on success, -ENODEV if logo layer not enabled; -EINVAL otherwise
+ */
+static int xlnx_mix_logo_load(struct xlnx_mix_hw *mixer, u32 logo_w, u32 logo_h,
+ u8 *r_buf, u8 *g_buf, u8 *b_buf, u8 *a_buf)
+{
+ void __iomem *reg = mixer->base;
+ struct xlnx_mix_layer_data *layer_data;
+
+ int x;
+ u32 shift;
+ u32 rword, gword, bword, aword;
+ u32 pixel_cnt = logo_w * logo_h;
+ u32 unaligned_pix_cnt = pixel_cnt % 4;
+ u32 width, height, curr_x_pos, curr_y_pos;
+ u32 rbase_addr, gbase_addr, bbase_addr, abase_addr;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, mixer->logo_layer_id);
+ rword = 0;
+ gword = 0;
+ bword = 0;
+ aword = 0;
+
+ if (!layer_data)
+ return -ENODEV;
+
+ /* RGBA data should be 32-bit word aligned */
+ if (unaligned_pix_cnt && mixer->logo_pixel_alpha_enabled)
+ return -EINVAL;
+
+ if (!(mixer->logo_layer_en &&
+ logo_w <= layer_data->hw_config.max_width &&
+ logo_h <= layer_data->hw_config.max_height))
+ return -EINVAL;
+
+ width = logo_w;
+ height = logo_h;
+ rbase_addr = XVMIX_LOGOR_V_BASE;
+ gbase_addr = XVMIX_LOGOG_V_BASE;
+ bbase_addr = XVMIX_LOGOB_V_BASE;
+ abase_addr = XVMIX_LOGOA_V_BASE;
+
+ for (x = 0; x < pixel_cnt; x++) {
+ shift = (x % 4) * 8;
+ rword |= r_buf[x] << shift;
+ gword |= g_buf[x] << shift;
+ bword |= b_buf[x] << shift;
+ if (mixer->logo_pixel_alpha_enabled)
+ aword |= a_buf[x] << shift;
+
+ if (x % 4 == 3) {
+ reg_writel(reg, (rbase_addr + (x - 3)), rword);
+ reg_writel(reg, (gbase_addr + (x - 3)), gword);
+ reg_writel(reg, (bbase_addr + (x - 3)), bword);
+ if (mixer->logo_pixel_alpha_enabled)
+ reg_writel(reg, (abase_addr + (x - 3)), aword);
+ }
+ }
+
+ curr_x_pos = layer_data->layer_regs.x_pos;
+ curr_y_pos = layer_data->layer_regs.y_pos;
+ return xlnx_mix_set_layer_window(mixer, mixer->logo_layer_id,
+ curr_x_pos, curr_y_pos,
+ logo_w, logo_h, 0);
+}
+
+static int xlnx_mix_update_logo_img(struct xlnx_mix_plane *plane,
+ struct drm_gem_cma_object *buffer,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_layer_data *logo_layer = plane->mixer_layer;
+ struct xlnx_mix_hw *mixer = to_mixer_hw(plane);
+ size_t pixel_cnt = src_h * src_w;
+ /* color comp defaults to offset in RG24 buffer */
+ u32 pix_cmp_cnt;
+ u32 logo_cmp_cnt;
+ bool per_pixel_alpha = false;
+ u32 max_width = logo_layer->hw_config.max_width;
+ u32 max_height = logo_layer->hw_config.max_height;
+ u32 min_width = logo_layer->hw_config.min_width;
+ u32 min_height = logo_layer->hw_config.min_height;
+ u8 *r_data = NULL;
+ u8 *g_data = NULL;
+ u8 *b_data = NULL;
+ u8 *a_data = NULL;
+ size_t el_size = sizeof(u8);
+ u8 *pixel_mem_data;
+ int ret, i, j;
+
+ /* ensure valid conditions for update */
+ if (logo_layer->id != mixer->logo_layer_id)
+ return 0;
+
+ if (src_h > max_height || src_w > max_width ||
+ src_h < min_height || src_w < min_width) {
+ DRM_ERROR("Mixer logo/cursor layer dimensions illegal.\n");
+ return -EINVAL;
+ }
+
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported for logo layer\n");
+ return -EINVAL;
+ }
+ per_pixel_alpha = (logo_layer->hw_config.vid_fmt ==
+ DRM_FORMAT_RGBA8888) ? true : false;
+ r_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ g_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ b_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+ if (per_pixel_alpha)
+ a_data = kcalloc(pixel_cnt, el_size, GFP_KERNEL);
+
+ if (!r_data || !g_data || !b_data || (per_pixel_alpha && !a_data)) {
+ DRM_ERROR("Unable to allocate memory for logo layer data\n");
+ ret = -ENOMEM;
+ goto free;
+ }
+ pix_cmp_cnt = per_pixel_alpha ? 4 : 3;
+ logo_cmp_cnt = pixel_cnt * pix_cmp_cnt;
+ /* ensure buffer attributes have changed to indicate new logo
+ * has been created
+ */
+ if ((phys_addr_t)buffer->vaddr == logo_layer->layer_regs.buff_addr1 &&
+ src_w == logo_layer->layer_regs.width &&
+ src_h == logo_layer->layer_regs.height)
+ return 0;
+
+ /* cache buffer address for future comparison */
+ logo_layer->layer_regs.buff_addr1 = (phys_addr_t)buffer->vaddr;
+ pixel_mem_data = (u8 *)(buffer->vaddr);
+ for (i = 0, j = 0; j < pixel_cnt; j++) {
+ if (per_pixel_alpha && a_data)
+ a_data[j] = pixel_mem_data[i++];
+
+ b_data[j] = pixel_mem_data[i++];
+ g_data[j] = pixel_mem_data[i++];
+ r_data[j] = pixel_mem_data[i++];
+ }
+ ret = xlnx_mix_logo_load(to_mixer_hw(plane), src_w, src_h, r_data,
+ g_data, b_data,
+ per_pixel_alpha ? a_data : NULL);
+free:
+ kfree(r_data);
+ kfree(g_data);
+ kfree(b_data);
+ kfree(a_data);
+
+ return ret;
+}
+
+/**
+ * xlnx_mix_set_plane - Implementation of DRM plane_update callback
+ * @plane: xlnx_mix_plane object containing references to
+ * the base plane and mixer
+ * @fb: Framebuffer descriptor
+ * @crtc_x: X position of layer on crtc. Note, if the plane represents either
+ * the master hardware layer (video0) or the layer representing the DRM primary
+ * layer, the crtc x/y coordinates are either ignored and/or set to 0/0
+ * respectively.
+ * @crtc_y: Y position of layer. See description of crtc_x handling
+ * for more inforation.
+ * @src_x: x-offset in memory buffer from which to start reading
+ * @src_y: y-offset in memory buffer from which to start reading
+ * @src_w: Number of horizontal pixels to read from memory per row
+ * @src_h: Number of rows of video data to read from memory
+ *
+ * Configures a mixer layer to comply with user space SET_PLANE icotl
+ * call.
+ *
+ * Return:
+ * Zero on success, non-zero linux error code otherwise.
+ */
+static int xlnx_mix_set_plane(struct xlnx_mix_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct xlnx_mix *mixer;
+ struct drm_gem_cma_object *luma_buffer;
+ u32 luma_stride = fb->pitches[0];
+ dma_addr_t luma_addr, chroma_addr = 0;
+ u32 active_area_width;
+ u32 active_area_height;
+ enum xlnx_mix_layer_id layer_id;
+ int ret;
+ const struct drm_format_info *info = fb->format;
+
+ mixer = plane->mixer;
+ mixer_hw = &mixer->mixer_hw;
+ layer_id = plane->mixer_layer->id;
+ active_area_width =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.width;
+ active_area_height =
+ mixer->drm_primary_layer->mixer_layer->layer_regs.height;
+ /* compute memory data */
+ luma_buffer = drm_fb_cma_get_gem_obj(fb, 0);
+ luma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 0);
+ if (!luma_addr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ if (info->num_planes > 1) {
+ chroma_addr = drm_fb_cma_get_gem_addr(fb, plane->base.state, 1);
+ if (!chroma_addr) {
+ DRM_ERROR("failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ }
+ ret = xlnx_mix_mark_layer_active(plane);
+ if (ret)
+ return ret;
+
+ switch (layer_id) {
+ case XVMIX_LAYER_MASTER:
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ xlnx_mix_mark_layer_inactive(plane);
+ if (mixer->drm_primary_layer == mixer->hw_master_layer) {
+ xlnx_mix_layer_disable(mixer_hw, layer_id);
+ ret = xlnx_mix_set_active_area(mixer_hw, src_w, src_h);
+ if (ret)
+ return ret;
+ xlnx_mix_layer_enable(mixer_hw, layer_id);
+
+ } else if (src_w != active_area_width ||
+ src_h != active_area_height) {
+ DRM_ERROR("Invalid dimensions for mixer layer 0.\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ret = xlnx_mix_set_layer_dimensions(plane, crtc_x, crtc_y,
+ src_w, src_h, luma_stride);
+ if (ret)
+ break;
+ if (layer_id == mixer_hw->logo_layer_id) {
+ ret = xlnx_mix_update_logo_img(plane, luma_buffer,
+ src_w, src_h);
+ } else {
+ if (!plane->mixer_layer->hw_config.is_streaming)
+ ret = xlnx_mix_set_layer_buff_addr
+ (mixer_hw, plane->mixer_layer->id,
+ luma_addr, chroma_addr);
+ }
+ }
+ return ret;
+}
+
+/* mode set a plane */
+static int xlnx_mix_plane_mode_set(struct drm_plane *base_plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_mix_plane *plane = to_xlnx_plane(base_plane);
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(plane);
+ const struct drm_format_info *info = fb->format;
+ size_t i = 0;
+ dma_addr_t luma_paddr;
+ int ret;
+ u32 stride;
+
+ /* JPM TODO begin start of code to extract into prep-interleaved*/
+ DRM_DEBUG_KMS("plane->id: %d\n", plane->id);
+ DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n", src_w, crtc_x, src_h, crtc_y);
+
+ /* We have multiple dma channels. Set each per video plane */
+ for (; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, base_plane->state, i);
+ if (!luma_paddr) {
+ DRM_ERROR("%s failed to get luma paddr\n", __func__);
+ return -EINVAL;
+ }
+
+ plane->dma[i].xt.numf = height;
+ plane->dma[i].sgl[0].size =
+ drm_format_plane_width_bytes(info, 0, width);
+ plane->dma[i].sgl[0].icg = fb->pitches[0] -
+ plane->dma[i].sgl[0].size;
+ plane->dma[i].xt.src_start = luma_paddr;
+ plane->dma[i].xt.frame_size = info->num_planes;
+ plane->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ plane->dma[i].xt.src_sgl = true;
+ plane->dma[i].xt.dst_sgl = false;
+ plane->dma[i].is_active = true;
+ }
+
+ for (; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ plane->dma[i].is_active = false;
+ /* Do we have a video format aware dma channel?
+ * If so, modify descriptor accordingly
+ */
+ if (plane->dma[0].chan && !plane->dma[1].chan && info->num_planes > 1) {
+ stride = plane->dma[0].sgl[0].size + plane->dma[0].sgl[0].icg;
+ plane->dma[0].sgl[0].src_icg = plane->dma[1].xt.src_start -
+ plane->dma[0].xt.src_start -
+ (plane->dma[0].xt.numf * stride);
+ }
+
+ if (mixer_hw->csc_enabled) {
+ /**
+ * magic numbers of coefficient table for colorimetry
+ * and range are derived from the following references:
+ * [1] Rec. ITU-R BT.601-6
+ * [2] Rec. ITU-R BT.709-5
+ * [3] Rec. ITU-R BT.2020
+ * [4] http://en.wikipedia.org/wiki/YCbCr
+ * coefficient table supports BT601 / BT709 / BT2020 encoding
+ * schemes and 16-235(limited) / 16-240(full) range.
+ */
+ xlnx_mix_set_yuv2_rgb_coeff(plane,
+ base_plane->state->color_encoding,
+ base_plane->state->color_range);
+ xlnx_mix_set_rgb2_yuv_coeff(plane,
+ base_plane->state->color_encoding,
+ base_plane->state->color_range);
+ }
+
+ ret = xlnx_mix_set_plane(plane, fb, crtc_x, crtc_y, src_x, src_y,
+ src_w, src_h);
+ return ret;
+}
+
+static int xlnx_mix_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ return 0;
+}
+
+static void xlnx_mix_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
+static int xlnx_mix_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int scale;
+ struct xlnx_mix_plane *mix_plane = to_xlnx_plane(plane);
+ struct xlnx_mix_hw *mixer_hw = to_mixer_hw(mix_plane);
+ struct xlnx_mix *mix;
+
+ /* No check required for the drm_primary_plane */
+ mix = container_of(mixer_hw, struct xlnx_mix, mixer_hw);
+ if (mix->drm_primary_layer == mix_plane)
+ return 0;
+
+ scale = xlnx_mix_get_layer_scaling(mixer_hw,
+ mix_plane->mixer_layer->id);
+ if (is_window_valid(mixer_hw, state->crtc_x, state->crtc_y,
+ state->src_w >> 16, state->src_h >> 16, scale))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void xlnx_mix_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ ret = xlnx_mix_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ DRM_ERROR("failed to mode-set a plane\n");
+ return;
+ }
+ /* apply the new fb addr */
+ xlnx_mix_plane_commit(plane);
+ /* make sure a plane is on */
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_ON);
+}
+
+static void xlnx_mix_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_mix_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static int xlnx_mix_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+xlnx_mix_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ /* Update the current state with new configurations */
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ xlnx_mix_plane_atomic_update(plane, old_state);
+}
+
+static const struct drm_plane_helper_funcs xlnx_mix_plane_helper_funcs = {
+ .prepare_fb = xlnx_mix_plane_prepare_fb,
+ .cleanup_fb = xlnx_mix_plane_cleanup_fb,
+ .atomic_check = xlnx_mix_plane_atomic_check,
+ .atomic_update = xlnx_mix_plane_atomic_update,
+ .atomic_disable = xlnx_mix_plane_atomic_disable,
+ .atomic_async_check = xlnx_mix_plane_atomic_async_check,
+ .atomic_async_update = xlnx_mix_plane_atomic_async_update,
+};
+
+static int xlnx_mix_init_plane(struct xlnx_mix_plane *plane,
+ unsigned int poss_crtcs,
+ struct device_node *layer_node)
+{
+ struct xlnx_mix *mixer = plane->mixer;
+ char name[16];
+ enum drm_plane_type type;
+ int ret, i;
+
+ plane->dpms = DRM_MODE_DPMS_OFF;
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++) {
+ snprintf(name, sizeof(name), "dma%d", i);
+ plane->dma[i].chan = of_dma_request_slave_channel(layer_node,
+ name);
+ if (PTR_ERR(plane->dma[i].chan) == -ENODEV) {
+ plane->dma[i].chan = NULL;
+ continue;
+ }
+ if (IS_ERR(plane->dma[i].chan)) {
+ DRM_ERROR("failed to request dma channel\n");
+ ret = PTR_ERR(plane->dma[i].chan);
+ plane->dma[i].chan = NULL;
+ goto err_dma;
+ }
+ }
+ if (!xlnx_mix_isfmt_support(plane->mixer_layer->hw_config.vid_fmt)) {
+ DRM_ERROR("DRM color format not supported by mixer\n");
+ ret = -ENODEV;
+ goto err_init;
+ }
+ plane->format = plane->mixer_layer->hw_config.vid_fmt;
+ if (plane == mixer->hw_logo_layer)
+ type = DRM_PLANE_TYPE_CURSOR;
+ if (plane == mixer->drm_primary_layer)
+ type = DRM_PLANE_TYPE_PRIMARY;
+
+ /* initialize drm plane */
+ ret = drm_universal_plane_init(mixer->drm, &plane->base,
+ poss_crtcs, &xlnx_mix_plane_funcs,
+ &plane->format,
+ 1, NULL, type, NULL);
+
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_init;
+ }
+ drm_plane_helper_add(&plane->base, &xlnx_mix_plane_helper_funcs);
+ of_node_put(layer_node);
+
+ return 0;
+
+err_init:
+ xlnx_mix_disp_layer_disable(plane);
+err_dma:
+ for (i = 0; i < XVMIX_MAX_NUM_SUB_PLANES; i++)
+ if (plane->dma[i].chan)
+ dma_release_channel(plane->dma[i].chan);
+
+ of_node_put(layer_node);
+ return ret;
+}
+
+static int xlnx_mix_parse_dt_bg_video_fmt(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct device_node *layer_node;
+ struct xlnx_mix_layer_data *layer;
+ const char *vformat;
+
+ layer_node = of_get_child_by_name(node, "layer_0");
+ layer = &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+
+ if (of_property_read_string(layer_node, "xlnx,vformat",
+ &vformat)) {
+ DRM_ERROR("No xlnx,vformat value for layer 0 in dts\n");
+ return -EINVAL;
+ }
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_u32(node, "xlnx,bpc", &mixer_hw->bg_layer_bpc)) {
+ DRM_ERROR("Failed to get bits per component (bpc) prop\n");
+ return -EINVAL;
+ }
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width)) {
+ DRM_ERROR("Failed to get screen width prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_width = layer->hw_config.max_width;
+ if (of_property_read_u32(layer_node, "xlnx,layer-max-height",
+ &layer->hw_config.max_height)) {
+ DRM_ERROR("Failed to get screen height prop\n");
+ return -EINVAL;
+ }
+ mixer_hw->max_layer_height = layer->hw_config.max_height;
+ layer->id = XVMIX_LAYER_MASTER;
+
+ return 0;
+}
+
+static int xlnx_mix_parse_dt_logo_data(struct device_node *node,
+ struct xlnx_mix_hw *mixer_hw)
+{
+ struct xlnx_mix_layer_data *layer_data;
+ struct device_node *logo_node;
+ u32 max_width, max_height;
+
+ logo_node = of_get_child_by_name(node, "logo");
+ if (!logo_node) {
+ DRM_ERROR("No logo node specified in device tree.\n");
+ return -EINVAL;
+ }
+
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+
+ /* set defaults for logo layer */
+ layer_data->hw_config.min_height = XVMIX_LOGO_LAYER_HEIGHT_MIN;
+ layer_data->hw_config.min_width = XVMIX_LOGO_LAYER_WIDTH_MIN;
+ layer_data->hw_config.is_streaming = false;
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGB888;
+ layer_data->hw_config.can_alpha = true;
+ layer_data->hw_config.can_scale = true;
+ layer_data->layer_regs.buff_addr1 = 0;
+ layer_data->layer_regs.buff_addr2 = 0;
+ layer_data->id = mixer_hw->logo_layer_id;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-width", &max_width)) {
+ DRM_ERROR("Failed to get logo width prop\n");
+ return -EINVAL;
+ }
+ if (max_width > XVMIX_LOGO_LAYER_WIDTH_MAX ||
+ max_width < XVMIX_LOGO_LAYER_WIDTH_MIN) {
+ DRM_ERROR("Illegal mixer logo layer width.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_width = max_width;
+ mixer_hw->max_logo_layer_width = layer_data->hw_config.max_width;
+
+ if (of_property_read_u32(logo_node, "xlnx,logo-height", &max_height)) {
+ DRM_ERROR("Failed to get logo height prop\n");
+ return -EINVAL;
+ }
+ if (max_height > XVMIX_LOGO_LAYER_HEIGHT_MAX ||
+ max_height < XVMIX_LOGO_LAYER_HEIGHT_MIN) {
+ DRM_ERROR("Illegal mixer logo layer height.\n");
+ return -EINVAL;
+ }
+ layer_data->hw_config.max_height = max_height;
+ mixer_hw->max_logo_layer_height = layer_data->hw_config.max_height;
+ mixer_hw->logo_pixel_alpha_enabled =
+ of_property_read_bool(logo_node, "xlnx,logo-pixel-alpha");
+ if (mixer_hw->logo_pixel_alpha_enabled)
+ layer_data->hw_config.vid_fmt = DRM_FORMAT_RGBA8888;
+
+ return 0;
+}
+
+static int xlnx_mix_dt_parse(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *planes;
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *vtc_node;
+ struct xlnx_mix_layer_data *l_data;
+ struct resource res;
+ int ret, l_cnt, i;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ mixer->dpms = DRM_MODE_DPMS_OFF;
+
+ mixer_hw->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(mixer_hw->reset_gpio)) {
+ ret = PTR_ERR(mixer_hw->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(dev, "No gpio probed for mixer. Deferring\n");
+ else
+ dev_err(dev, "No reset gpio info from dts for mixer\n");
+ return ret;
+ }
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "Invalid memory address for mixer %d\n", ret);
+ return ret;
+ }
+ /* Read in mandatory global dts properties */
+ mixer_hw->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(mixer_hw->base)) {
+ dev_err(dev, "Failed to map io mem space for mixer\n");
+ return PTR_ERR(mixer_hw->base);
+ }
+ if (of_device_is_compatible(dev->of_node, "xlnx,mixer-4.0") ||
+ of_device_is_compatible(dev->of_node, "xlnx,mixer-5.0")) {
+ mixer_hw->max_layers = 18;
+ mixer_hw->logo_en_mask = BIT(23);
+ mixer_hw->enable_all_mask = (GENMASK(16, 0) |
+ mixer_hw->logo_en_mask);
+ } else {
+ mixer_hw->max_layers = 10;
+ mixer_hw->logo_en_mask = BIT(15);
+ mixer_hw->enable_all_mask = (GENMASK(8, 0) |
+ mixer_hw->logo_en_mask);
+ }
+ if (of_device_is_compatible(dev->of_node, "xlnx,mixer-5.0")) {
+ const char *prop_name = "xlnx,enable-csc-coefficient-register";
+
+ mixer_hw->csc_enabled = of_property_read_bool(node, prop_name);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-layers",
+ &mixer_hw->num_layers);
+ if (ret) {
+ dev_err(dev, "No xlnx,num-layers dts prop for mixer node\n");
+ return ret;
+ }
+ mixer_hw->logo_layer_id = mixer_hw->max_layers - 1;
+ if (mixer_hw->num_layers > mixer_hw->max_layers) {
+ dev_err(dev, "Num layer nodes in device tree > mixer max\n");
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &mixer_hw->dma_addr_size);
+ if (ret) {
+ dev_err(dev, "missing addr-width dts prop\n");
+ return ret;
+ }
+ if (mixer_hw->dma_addr_size != 32 && mixer_hw->dma_addr_size != 64) {
+ dev_err(dev, "invalid addr-width dts prop\n");
+ return -EINVAL;
+ }
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ mixer->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!mixer->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ mixer_hw->logo_layer_en = of_property_read_bool(node,
+ "xlnx,logo-layer");
+ l_cnt = mixer_hw->num_layers + (mixer_hw->logo_layer_en ? 1 : 0);
+ mixer_hw->layer_cnt = l_cnt;
+
+ l_data = devm_kzalloc(dev, sizeof(*l_data) * l_cnt, GFP_KERNEL);
+ if (!l_data)
+ return -ENOMEM;
+ mixer_hw->layer_data = l_data;
+ /* init DRM planes */
+ planes = devm_kzalloc(dev, sizeof(*planes) * l_cnt, GFP_KERNEL);
+ if (!planes)
+ return -ENOMEM;
+ mixer->planes = planes;
+ mixer->num_planes = l_cnt;
+ for (i = 0; i < mixer->num_planes; i++)
+ mixer->planes[i].mixer = mixer;
+
+ /* establish background layer video properties from dts */
+ ret = xlnx_mix_parse_dt_bg_video_fmt(node, mixer_hw);
+ if (ret)
+ return ret;
+ if (mixer_hw->logo_layer_en) {
+ /* read logo data from dts */
+ ret = xlnx_mix_parse_dt_logo_data(node, mixer_hw);
+ return ret;
+ }
+ return 0;
+}
+
+static int xlnx_mix_of_init_layer(struct device *dev, struct device_node *node,
+ char *name, struct xlnx_mix_layer_data *layer,
+ u32 max_width, struct xlnx_mix *mixer, int id)
+{
+ struct device_node *layer_node;
+ const char *vformat;
+ int ret;
+
+ layer_node = of_get_child_by_name(node, name);
+ if (!layer_node)
+ return -EINVAL;
+
+ /* Set default values */
+ layer->hw_config.can_alpha = false;
+ layer->hw_config.can_scale = false;
+ layer->hw_config.is_streaming = false;
+ layer->hw_config.max_width = max_width;
+ layer->hw_config.min_width = XVMIX_LAYER_WIDTH_MIN;
+ layer->hw_config.min_height = XVMIX_LAYER_HEIGHT_MIN;
+ layer->hw_config.vid_fmt = 0;
+ layer->id = 0;
+ mixer->planes[id].mixer_layer = layer;
+
+ ret = of_property_read_u32(layer_node, "xlnx,layer-id", &layer->id);
+ if (ret) {
+ dev_err(dev, "xlnx,layer-id property not found\n");
+ return ret;
+ }
+ if (layer->id < 1 || layer->id >= mixer->mixer_hw.max_layers) {
+ dev_err(dev, "Mixer layer id %u in dts is out of legal range\n",
+ layer->id);
+ return -EINVAL;
+ }
+ ret = of_property_read_string(layer_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No mixer layer vformat in dts for layer id %d\n",
+ layer->id);
+ return ret;
+ }
+
+ strcpy((char *)&layer->hw_config.vid_fmt, vformat);
+ layer->hw_config.can_scale =
+ of_property_read_bool(layer_node, "xlnx,layer-scale");
+ if (layer->hw_config.can_scale) {
+ ret = of_property_read_u32(layer_node, "xlnx,layer-max-width",
+ &layer->hw_config.max_width);
+ if (ret) {
+ dev_err(dev, "Mixer layer %d dts missing width prop.\n",
+ layer->id);
+ return ret;
+ }
+
+ if (layer->hw_config.max_width > max_width) {
+ dev_err(dev, "Illlegal Mixer layer %d width %d\n",
+ layer->id, layer->hw_config.max_width);
+ return -EINVAL;
+ }
+ }
+ layer->hw_config.can_alpha =
+ of_property_read_bool(layer_node, "xlnx,layer-alpha");
+ layer->hw_config.is_streaming =
+ of_property_read_bool(layer_node, "xlnx,layer-streaming");
+ if (of_property_read_bool(layer_node, "xlnx,layer-primary")) {
+ if (mixer->drm_primary_layer) {
+ dev_err(dev,
+ "More than one primary layer in mixer dts\n");
+ return -EINVAL;
+ }
+ mixer->drm_primary_layer = &mixer->planes[id];
+ }
+ ret = xlnx_mix_init_plane(&mixer->planes[id], 1, layer_node);
+ if (ret)
+ dev_err(dev, "Unable to init drm mixer plane id = %u", id);
+
+ return ret;
+}
+
+static irqreturn_t xlnx_mix_intr_handler(int irq, void *data)
+{
+ struct xlnx_mix_hw *mixer = data;
+ u32 intr = xlnx_mix_get_intr_status(mixer);
+
+ if (!intr)
+ return IRQ_NONE;
+ if (mixer->intrpt_handler_fn)
+ mixer->intrpt_handler_fn(mixer->intrpt_data);
+ xlnx_mix_clear_intr_status(mixer, intr);
+
+ return IRQ_HANDLED;
+}
+
+static void xlnx_mix_create_plane_properties(struct xlnx_mix *mixer)
+{
+ mixer->scale_prop = drm_property_create_range(mixer->drm, 0, "scale",
+ XVMIX_SCALE_FACTOR_1X,
+ XVMIX_SCALE_FACTOR_4X);
+ mixer->alpha_prop = drm_property_create_range(mixer->drm, 0, "alpha",
+ XVMIX_ALPHA_MIN,
+ XVMIX_ALPHA_MAX);
+}
+
+static int xlnx_mix_plane_create(struct device *dev, struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw;
+ struct device_node *node, *layer_node;
+ char name[20];
+ struct xlnx_mix_layer_data *layer_data;
+ int ret, i;
+ int layer_idx;
+
+ node = dev->of_node;
+ mixer_hw = &mixer->mixer_hw;
+ xlnx_mix_create_plane_properties(mixer);
+
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_MASTER_LAYER_IDX];
+ mixer->planes[XVMIX_MASTER_LAYER_IDX].id = XVMIX_MASTER_LAYER_IDX;
+ mixer->hw_master_layer = &mixer->planes[XVMIX_MASTER_LAYER_IDX];
+
+ if (mixer_hw->logo_layer_en) {
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].mixer_layer =
+ &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->planes[XVMIX_LOGO_LAYER_IDX].id = XVMIX_LOGO_LAYER_IDX;
+ mixer->hw_logo_layer = &mixer->planes[XVMIX_LOGO_LAYER_IDX];
+ layer_node = of_get_child_by_name(node, "logo");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_LOGO_LAYER_IDX],
+ 1, layer_node);
+ if (ret)
+ return ret;
+ }
+ layer_idx = mixer_hw->logo_layer_en ? 2 : 1;
+ for (i = 1; i < mixer_hw->num_layers; i++, layer_idx++) {
+ snprintf(name, sizeof(name), "layer_%d", i);
+ ret = xlnx_mix_of_init_layer(dev, node, name,
+ &mixer_hw->layer_data[layer_idx],
+ mixer_hw->max_layer_width,
+ mixer, layer_idx);
+ if (ret)
+ return ret;
+ }
+ /* If none of the overlay layers were designated as the drm
+ * primary layer, default to the mixer's video0 layer as drm primary
+ */
+ if (!mixer->drm_primary_layer)
+ mixer->drm_primary_layer = mixer->hw_master_layer;
+ layer_node = of_get_child_by_name(node, "layer_0");
+ ret = xlnx_mix_init_plane(&mixer->planes[XVMIX_MASTER_LAYER_IDX], 1,
+ layer_node);
+ /* request irq and obtain pixels-per-clock (ppc) property */
+ mixer_hw->irq = irq_of_parse_and_map(node, 0);
+ if (mixer_hw->irq > 0) {
+ ret = devm_request_irq(dev, mixer_hw->irq,
+ xlnx_mix_intr_handler,
+ IRQF_SHARED, "xlnx-mixer", mixer_hw);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+ }
+ ret = of_property_read_u32(node, "xlnx,ppc", &mixer_hw->ppc);
+ if (ret) {
+ dev_err(dev, "No xlnx,ppc property for mixer dts\n");
+ return ret;
+ }
+
+ mixer->max_width = XVMIX_DISP_MAX_WIDTH;
+ mixer->max_height = XVMIX_DISP_MAX_HEIGHT;
+ if (mixer->hw_logo_layer) {
+ layer_data = &mixer_hw->layer_data[XVMIX_LOGO_LAYER_IDX];
+ mixer->max_cursor_width = layer_data->hw_config.max_width;
+ mixer->max_cursor_height = layer_data->hw_config.max_height;
+ }
+ return 0;
+}
+
+/**
+ * xlnx_mix_plane_restore - Restore the plane states
+ * @mixer: mixer device core structure
+ *
+ * Restore the plane states to the default ones. Any state that needs to be
+ * restored should be here. This improves consistency as applications see
+ * the same default values, and removes mismatch between software and hardware
+ * values as software values are updated as hardware values are reset.
+ */
+static void xlnx_mix_plane_restore(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_plane *plane;
+ unsigned int i;
+
+ if (!mixer)
+ return;
+ /*
+ * Reinitialize property default values as they get reset by DPMS OFF
+ * operation. User will read the correct default values later, and
+ * planes will be initialized with default values.
+ */
+ for (i = 0; i < mixer->num_planes; i++) {
+ plane = &mixer->planes[i];
+ if (!plane)
+ continue;
+ xlnx_mix_hw_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ }
+}
+
+/**
+ * xlnx_mix_set_bkg_col - Set background color
+ * @mixer: Mixer instance to program with new background color
+ * @rgb_value: RGB encoded as 32-bit integer in little-endian format
+ *
+ * Set the color to be output as background color when background stream layer
+ */
+static void xlnx_mix_set_bkg_col(struct xlnx_mix_hw *mixer, u64 rgb_value)
+{
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u32 bpc_mask_shift = XVMIX_MAX_BPC - bg_bpc;
+ u32 val_mask = (GENMASK(15, 0) >> bpc_mask_shift);
+ u16 b_val = (rgb_value >> (bg_bpc * 2)) & val_mask;
+ u16 g_val = (rgb_value >> bg_bpc) & val_mask;
+ u16 r_val = (rgb_value >> 0) & val_mask;
+
+ /* Set Background Color */
+ reg_writel(mixer->base, XVMIX_BACKGROUND_Y_R_DATA, r_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_U_G_DATA, g_val);
+ reg_writel(mixer->base, XVMIX_BACKGROUND_V_B_DATA, b_val);
+ mixer->bg_color = rgb_value;
+}
+
+/**
+ * xlnx_mix_reset - Reset the mixer core video generator
+ * @mixer: Mixer core instance for which to start video output
+ *
+ * Toggle the reset gpio and restores the bg color, plane and interrupt mask.
+ */
+static void xlnx_mix_reset(struct xlnx_mix *mixer)
+{
+ struct xlnx_mix_hw *mixer_hw = &mixer->mixer_hw;
+
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 0);
+ gpiod_set_raw_value(mixer_hw->reset_gpio, 1);
+ /* restore layer properties and bg color after reset */
+ xlnx_mix_set_bkg_col(mixer_hw, mixer_hw->bg_color);
+ xlnx_mix_plane_restore(mixer);
+ xlnx_mix_intrpt_enable_done(&mixer->mixer_hw);
+}
+
+static void xlnx_mix_dpms(struct xlnx_mix *mixer, int dpms)
+{
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ xlnx_mix_start(&mixer->mixer_hw);
+ break;
+ default:
+ xlnx_mix_stop(&mixer->mixer_hw);
+ mdelay(50); /* let IP shut down */
+ xlnx_mix_reset(mixer);
+ }
+}
+
+/* set crtc dpms */
+static void xlnx_mix_crtc_dpms(struct drm_crtc *base_crtc, int dpms)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ int ret;
+ struct videomode vm;
+ struct drm_display_mode *mode = &base_crtc->mode;
+
+ DRM_DEBUG_KMS("dpms: %d\n", dpms);
+ if (mixer->dpms == dpms)
+ return;
+ mixer->dpms = dpms;
+
+ switch (dpms) {
+ case DRM_MODE_DPMS_ON:
+ if (!mixer->pixel_clock_enabled) {
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ }
+ }
+ mixer->pixel_clock_enabled = true;
+
+ if (mixer->vtc_bridge) {
+ drm_display_mode_to_videomode(mode, &vm);
+ xlnx_bridge_set_timing(mixer->vtc_bridge, &vm);
+ xlnx_bridge_enable(mixer->vtc_bridge);
+ }
+
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ break;
+ default:
+ xlnx_mix_plane_dpms(base_crtc->primary, dpms);
+ xlnx_mix_dpms(mixer, dpms);
+ xlnx_bridge_disable(mixer->vtc_bridge);
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ break;
+ }
+}
+
+static void xlnx_mix_set_intr_handler(struct xlnx_mix *mixer,
+ void (*intr_handler_fn)(void *),
+ void *data)
+{
+ mixer->mixer_hw.intrpt_handler_fn = intr_handler_fn;
+ mixer->mixer_hw.intrpt_data = data;
+}
+
+static void xlnx_mix_crtc_vblank_handler(void *data)
+{
+ struct drm_crtc *base_crtc = data;
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+ struct drm_device *drm = base_crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(base_crtc);
+ /* Finish page flip */
+ spin_lock_irqsave(&drm->event_lock, flags);
+ event = mixer->event;
+ mixer->event = NULL;
+ if (event) {
+ drm_crtc_send_vblank_event(base_crtc, event);
+ drm_crtc_vblank_put(base_crtc);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int xlnx_mix_crtc_enable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ xlnx_mix_set_intr_handler(mixer, xlnx_mix_crtc_vblank_handler,
+ base_crtc);
+ return 0;
+}
+
+static void xlnx_mix_crtc_disable_vblank(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ mixer->mixer_hw.intrpt_handler_fn = NULL;
+ mixer->mixer_hw.intrpt_data = NULL;
+}
+
+static void xlnx_mix_crtc_destroy(struct drm_crtc *base_crtc)
+{
+ struct xlnx_crtc *crtc = to_xlnx_crtc(base_crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(crtc);
+
+ /* make sure crtc is off */
+ mixer->alpha_prop = NULL;
+ mixer->scale_prop = NULL;
+ mixer->bg_color = NULL;
+ xlnx_mix_crtc_dpms(base_crtc, DRM_MODE_DPMS_OFF);
+
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+ drm_crtc_cleanup(base_crtc);
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static int
+xlnx_mix_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ return 0;
+}
+
+static struct drm_crtc_funcs xlnx_mix_crtc_funcs = {
+ .destroy = xlnx_mix_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = xlnx_mix_disp_crtc_atomic_set_property,
+ .atomic_get_property = xlnx_mix_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_mix_crtc_enable_vblank,
+ .disable_vblank = xlnx_mix_crtc_disable_vblank,
+};
+
+static void
+xlnx_mix_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+ /* Delay of 3 vblank interval for timing gen to be stable */
+ vrefresh = ((adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal));
+ msleep(3 * 1000 / vrefresh);
+}
+
+/**
+ * xlnx_mix_clear_event - Clear any event if pending
+ * @crtc: DRM crtc object
+ *
+ */
+static void xlnx_mix_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void
+xlnx_mix_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ xlnx_mix_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ xlnx_mix_clear_event(crtc);
+}
+
+static void xlnx_mix_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+}
+
+static int xlnx_mix_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+xlnx_mix_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ /* Don't rely on vblank when disabling crtc */
+ if (crtc->state->event) {
+ struct xlnx_crtc *xcrtc = to_xlnx_crtc(crtc);
+ struct xlnx_mix *mixer = to_xlnx_mixer(xcrtc);
+
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ mixer->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+}
+
+static struct drm_crtc_helper_funcs xlnx_mix_crtc_helper_funcs = {
+ .atomic_enable = xlnx_mix_crtc_atomic_enable,
+ .atomic_disable = xlnx_mix_crtc_atomic_disable,
+ .mode_set_nofb = xlnx_mix_crtc_mode_set_nofb,
+ .atomic_check = xlnx_mix_crtc_atomic_check,
+ .atomic_begin = xlnx_mix_crtc_atomic_begin,
+};
+
+/**
+ * xlnx_mix_crtc_create - create crtc for mixer
+ * @mixer: xilinx video mixer object
+ *
+ * Return:
+ * Zero on success, error on failure
+ *
+ */
+static int xlnx_mix_crtc_create(struct xlnx_mix *mixer)
+{
+ struct xlnx_crtc *crtc;
+ struct drm_plane *primary_plane = NULL;
+ struct drm_plane *cursor_plane = NULL;
+ int ret, i;
+
+ crtc = &mixer->crtc;
+ primary_plane = &mixer->drm_primary_layer->base;
+ cursor_plane = &mixer->hw_logo_layer->base;
+
+ for (i = 0; i < mixer->num_planes; i++)
+ xlnx_mix_attach_plane_prop(&mixer->planes[i]);
+ mixer->pixel_clock = devm_clk_get(mixer->drm->dev, NULL);
+ if (IS_ERR(mixer->pixel_clock)) {
+ DRM_DEBUG_KMS("failed to get pixel clock\n");
+ mixer->pixel_clock = NULL;
+ }
+ ret = clk_prepare_enable(mixer->pixel_clock);
+ if (ret) {
+ DRM_ERROR("failed to enable a pixel clock\n");
+ mixer->pixel_clock_enabled = false;
+ goto err_plane;
+ }
+ mixer->pixel_clock_enabled = true;
+ /* initialize drm crtc */
+ ret = drm_crtc_init_with_planes(mixer->drm, &crtc->crtc,
+ &mixer->drm_primary_layer->base,
+ &mixer->hw_logo_layer->base,
+ &xlnx_mix_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize mixer crtc\n");
+ goto err_pixel_clk;
+ }
+ drm_crtc_helper_add(&crtc->crtc, &xlnx_mix_crtc_helper_funcs);
+ crtc->get_max_width = &xlnx_mix_crtc_get_max_width;
+ crtc->get_max_height = &xlnx_mix_crtc_get_max_height;
+ crtc->get_align = &xlnx_mix_crtc_get_align;
+ crtc->get_format = &xlnx_mix_crtc_get_format;
+ crtc->get_cursor_height = &xlnx_mix_crtc_get_max_cursor_height;
+ crtc->get_cursor_width = &xlnx_mix_crtc_get_max_cursor_width;
+ xlnx_crtc_register(mixer->drm, crtc);
+
+ return 0;
+
+err_pixel_clk:
+ if (mixer->pixel_clock_enabled) {
+ clk_disable_unprepare(mixer->pixel_clock);
+ mixer->pixel_clock_enabled = false;
+ }
+err_plane:
+ return ret;
+}
+
+/**
+ * xlnx_mix_init - Establishes a default power-on state for the mixer IP
+ * core
+ * @mixer: instance of IP core to initialize to a default state
+ *
+ * Background layer initialized to maximum height and width settings based on
+ * device tree properties and all overlay layers set to minimum height and width
+ * sizes and positioned to 0,0 in the crtc. All layers are inactive (resulting
+ * in video output being generated by the background color generator).
+ * Interrupts are disabled and the IP is started (with auto-restart enabled).
+ */
+static void xlnx_mix_init(struct xlnx_mix_hw *mixer)
+{
+ u32 i;
+ u32 bg_bpc = mixer->bg_layer_bpc;
+ u64 rgb_bg_clr = (0xFFFF >> (XVMIX_MAX_BPC - bg_bpc)) << (bg_bpc * 2);
+ enum xlnx_mix_layer_id layer_id;
+ struct xlnx_mix_layer_data *layer_data;
+
+ layer_data = xlnx_mix_get_layer_data(mixer, XVMIX_LAYER_MASTER);
+ xlnx_mix_layer_disable(mixer, mixer->max_layers);
+ xlnx_mix_set_active_area(mixer, layer_data->hw_config.max_width,
+ layer_data->hw_config.max_height);
+ /* default to blue */
+ xlnx_mix_set_bkg_col(mixer, rgb_bg_clr);
+
+ for (i = 0; i < mixer->layer_cnt; i++) {
+ layer_id = mixer->layer_data[i].id;
+ layer_data = &mixer->layer_data[i];
+ if (layer_id == XVMIX_LAYER_MASTER)
+ continue;
+ xlnx_mix_set_layer_window(mixer, layer_id, 0, 0,
+ XVMIX_LAYER_WIDTH_MIN,
+ XVMIX_LAYER_HEIGHT_MIN, 0);
+ if (layer_data->hw_config.can_scale)
+ xlnx_mix_set_layer_scaling(mixer, layer_id, 0);
+ if (layer_data->hw_config.can_alpha)
+ xlnx_mix_set_layer_alpha(mixer, layer_id,
+ XVMIX_ALPHA_MAX);
+ }
+ xlnx_mix_intrpt_enable_done(mixer);
+}
+
+static int xlnx_mix_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+ struct drm_device *drm = data;
+ u32 ret;
+
+ mixer->drm = drm;
+ ret = xlnx_mix_plane_create(dev, mixer);
+ if (ret)
+ return ret;
+ ret = xlnx_mix_crtc_create(mixer);
+ if (ret)
+ return ret;
+ xlnx_mix_init(&mixer->mixer_hw);
+
+ return ret;
+}
+
+static void xlnx_mix_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_mix *mixer = dev_get_drvdata(dev);
+
+ dev_set_drvdata(dev, NULL);
+ xlnx_mix_intrpt_disable(&mixer->mixer_hw);
+ xlnx_crtc_unregister(mixer->drm, &mixer->crtc);
+}
+
+static const struct component_ops xlnx_mix_component_ops = {
+ .bind = xlnx_mix_bind,
+ .unbind = xlnx_mix_unbind,
+};
+
+static int xlnx_mix_probe(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer;
+ int ret;
+
+ mixer = devm_kzalloc(&pdev->dev, sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return -ENOMEM;
+
+ /* Sub-driver will access mixer from drvdata */
+ platform_set_drvdata(pdev, mixer);
+ ret = xlnx_mix_dt_parse(&pdev->dev, mixer);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to probe mixer\n");
+ return ret;
+ }
+
+ ret = component_add(&pdev->dev, &xlnx_mix_component_ops);
+ if (ret)
+ goto err;
+
+ mixer->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(mixer->master)) {
+ dev_err(&pdev->dev, "Failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ dev_info(&pdev->dev, "Xilinx Mixer driver probed success\n");
+ return ret;
+
+err_component:
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+err:
+ return ret;
+}
+
+static int xlnx_mix_remove(struct platform_device *pdev)
+{
+ struct xlnx_mix *mixer = platform_get_drvdata(pdev);
+
+ if (mixer->vtc_bridge)
+ of_xlnx_bridge_put(mixer->vtc_bridge);
+ xlnx_drm_pipeline_exit(mixer->master);
+ component_del(&pdev->dev, &xlnx_mix_component_ops);
+ return 0;
+}
+
+/*
+ * TODO:
+ * In Mixer IP core version 4.0, layer enable bits and logo layer offsets
+ * have been changed. To provide backward compatibility number of max layers
+ * field has been taken to differentiate IP versions.
+ * This logic will have to be changed properly using the IP core version.
+ */
+
+static const struct of_device_id xlnx_mix_of_match[] = {
+ { .compatible = "xlnx,mixer-3.0", },
+ { .compatible = "xlnx,mixer-4.0", },
+ { .compatible = "xlnx,mixer-5.0", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xlnx_mix_of_match);
+
+static struct platform_driver xlnx_mix_driver = {
+ .probe = xlnx_mix_probe,
+ .remove = xlnx_mix_remove,
+ .driver = {
+ .name = "xlnx-mixer",
+ .of_match_table = xlnx_mix_of_match,
+ },
+};
+
+module_platform_driver(xlnx_mix_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx Mixer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_pl_disp.c b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
new file mode 100644
index 000000000000..edd5692bfd1e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_pl_disp.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx DRM CRTC DMA engine driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author : Saurabh Sengar <saurabhs@xilinx.com>
+ * : Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_drv.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * This driver intends to support the display pipeline with DMA engine
+ * driver by initializing DRM crtc and plane objects. The driver makes
+ * an assumption that it's single plane pipeline, as multi-plane pipeline
+ * would require programing beyond the DMA engine interface.
+ */
+
+/**
+ * struct xlnx_dma_chan - struct for DMA engine
+ * @dma_chan: DMA channel
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct xlnx_dma_chan {
+ struct dma_chan *dma_chan;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct xlnx_pl_disp - struct for display subsystem
+ * @dev: device structure
+ * @master: logical master device from xlnx drm
+ * @xlnx_crtc: Xilinx DRM driver crtc object
+ * @plane: base drm plane object
+ * @chan: struct for DMA engine
+ * @event: vblank pending event
+ * @callback: callback for registering DMA callback function
+ * @callback_param: parameter for passing to DMA callback function
+ * @drm: core drm object
+ * @fmt: drm color format
+ * @vtc_bridge: vtc_bridge structure
+ * @fid: field id
+ * @prev_fid: previous field id
+ */
+struct xlnx_pl_disp {
+ struct device *dev;
+ struct platform_device *master;
+ struct xlnx_crtc xlnx_crtc;
+ struct drm_plane plane;
+ struct xlnx_dma_chan *chan;
+ struct drm_pending_vblank_event *event;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ struct drm_device *drm;
+ u32 fmt;
+ struct xlnx_bridge *vtc_bridge;
+ u32 fid;
+ u32 prev_fid;
+};
+
+/*
+ * Xlnx crtc functions
+ */
+static inline struct xlnx_pl_disp *crtc_to_dma(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct xlnx_pl_disp, xlnx_crtc);
+}
+
+/**
+ * xlnx_pl_disp_complete - vblank handler
+ * @param: parameter to vblank handler
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object.
+ */
+static void xlnx_pl_disp_complete(void *param)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = param;
+ struct drm_device *drm = xlnx_pl_disp->drm;
+
+ drm_handle_vblank(drm, 0);
+}
+
+/**
+ * xlnx_pl_disp_get_format - Get the current display pipeline format
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the current format of pipeline
+ *
+ * Return: the corresponding DRM_FORMAT_XXX
+ */
+static uint32_t xlnx_pl_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return xlnx_pl_disp->fmt;
+}
+
+/**
+ * xlnx_pl_disp_get_align - Get the alignment value for pitch
+ * @xlnx_crtc: xlnx crtc object
+ *
+ * Get the alignment value for pitch from the plane
+ *
+ * Return: The alignment value if successful, or the error code.
+ */
+static unsigned int xlnx_pl_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ return 1 << xlnx_pl_disp->chan->dma_chan->device->copy_align;
+}
+
+/*
+ * DRM plane functions
+ */
+static inline struct xlnx_pl_disp *plane_to_dma(struct drm_plane *plane)
+{
+ return container_of(plane, struct xlnx_pl_disp, plane);
+}
+
+/**
+ * xlnx_pl_disp_plane_disable - Disables DRM plane
+ * @plane: DRM plane object
+ *
+ * Disable the DRM plane, by stopping the corrosponding DMA
+ */
+static void xlnx_pl_disp_plane_disable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+}
+
+/**
+ * xlnx_pl_disp_plane_enable - Enables DRM plane
+ * @plane: DRM plane object
+ *
+ * Enable the DRM plane, by enabling the corresponding DMA
+ */
+static void xlnx_pl_disp_plane_enable(struct drm_plane *plane)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+ struct dma_chan *dma_chan = xlnx_dma_chan->dma_chan;
+ struct dma_interleaved_template *xt = &xlnx_dma_chan->xt;
+
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma_chan, xt, flags);
+ if (!desc) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to prepare DMA descriptor\n");
+ return;
+ }
+ desc->callback = xlnx_pl_disp->callback;
+ desc->callback_param = xlnx_pl_disp->callback_param;
+ xilinx_xdma_set_earlycb(xlnx_dma_chan->dma_chan, desc, EARLY_CALLBACK);
+
+ if (plane->state->crtc->state->adjusted_mode.flags &
+ DRM_MODE_FLAG_INTERLACE) {
+ /*
+ * Framebuffer DMA Reader sends the first field twice, which
+ * causes the following fields out of order. The fid is
+ * reverted to restore the order
+ */
+ if (plane->state->fb->flags == DRM_MODE_FB_ALTERNATE_TOP) {
+ xlnx_pl_disp->fid = 0;
+ } else if (plane->state->fb->flags ==
+ DRM_MODE_FB_ALTERNATE_BOTTOM) {
+ xlnx_pl_disp->fid = 1;
+ } else {
+ /*
+ * FIXME: for interlace mode, application may send
+ * dummy packets before the video field, need to set
+ * the fid correctly to avoid display distortion
+ */
+ xlnx_pl_disp->fid = !xlnx_pl_disp->prev_fid;
+ }
+
+ if (xlnx_pl_disp->fid == xlnx_pl_disp->prev_fid) {
+ xlnx_pl_disp_complete(xlnx_pl_disp);
+ return;
+ }
+
+ xilinx_xdma_set_fid(xlnx_dma_chan->dma_chan, desc,
+ xlnx_pl_disp->fid);
+ xlnx_pl_disp->prev_fid = xlnx_pl_disp->fid;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(xlnx_dma_chan->dma_chan);
+}
+
+static void xlnx_pl_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ xlnx_pl_disp_plane_disable(plane);
+}
+
+static int xlnx_pl_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, uint32_t src_y,
+ u32 src_w, uint32_t src_h)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+ const struct drm_format_info *info = fb->format;
+ dma_addr_t luma_paddr, chroma_paddr;
+ size_t stride;
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ if (info->num_planes > 2) {
+ dev_err(xlnx_pl_disp->dev, "Color format not supported\n");
+ return -EINVAL;
+ }
+ luma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ if (!luma_paddr) {
+ dev_err(xlnx_pl_disp->dev, "failed to get luma paddr\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(xlnx_pl_disp->dev, "num planes = %d\n", info->num_planes);
+ xlnx_dma_chan->xt.numf = src_h;
+ xlnx_dma_chan->sgl[0].size = drm_format_plane_width_bytes(info,
+ 0, src_w);
+ xlnx_dma_chan->sgl[0].icg = fb->pitches[0] - xlnx_dma_chan->sgl[0].size;
+ xlnx_dma_chan->xt.src_start = luma_paddr;
+ xlnx_dma_chan->xt.frame_size = info->num_planes;
+ xlnx_dma_chan->xt.dir = DMA_MEM_TO_DEV;
+ xlnx_dma_chan->xt.src_sgl = true;
+ xlnx_dma_chan->xt.dst_sgl = false;
+
+ /* Do we have a video format aware dma channel?
+ * so, modify descriptor accordingly. Hueristic test:
+ * we have a multi-plane format but only one dma channel
+ */
+ if (info->num_planes > 1) {
+ chroma_paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 1);
+ if (!chroma_paddr) {
+ dev_err(xlnx_pl_disp->dev,
+ "failed to get chroma paddr\n");
+ return -EINVAL;
+ }
+ stride = xlnx_dma_chan->sgl[0].size +
+ xlnx_dma_chan->sgl[0].icg;
+ xlnx_dma_chan->sgl[0].src_icg = chroma_paddr -
+ xlnx_dma_chan->xt.src_start -
+ (xlnx_dma_chan->xt.numf * stride);
+ }
+
+ return 0;
+}
+
+static void xlnx_pl_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+ struct xlnx_pl_disp *xlnx_pl_disp = plane_to_dma(plane);
+
+ ret = xlnx_pl_disp_plane_mode_set(plane,
+ plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret) {
+ dev_err(xlnx_pl_disp->dev, "failed to mode set a plane\n");
+ return;
+ }
+ /* in case frame buffer is used set the color format */
+ xilinx_xdma_drm_config(xlnx_pl_disp->chan->dma_chan,
+ xlnx_pl_disp->plane.state->fb->format->format);
+ /* apply the new fb addr and enable */
+ xlnx_pl_disp_plane_enable(plane);
+}
+
+static int
+xlnx_pl_disp_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ /* plane must be enabled when state is active */
+ if (new_crtc_state->active && !new_plane_state->crtc)
+ return -EINVAL;
+
+ /*
+ * This check is required to call modeset if there is a change in color
+ * format
+ */
+ if (new_plane_state->fb && old_plane_state->fb &&
+ new_plane_state->fb->format->format !=
+ old_plane_state->fb->format->format)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
+static const struct drm_plane_helper_funcs xlnx_pl_disp_plane_helper_funcs = {
+ .atomic_update = xlnx_pl_disp_plane_atomic_update,
+ .atomic_disable = xlnx_pl_disp_plane_atomic_disable,
+ .atomic_check = xlnx_pl_disp_plane_atomic_check,
+};
+
+static struct drm_plane_funcs xlnx_pl_disp_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct xlnx_pl_disp *drm_crtc_to_dma(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return crtc_to_dma(xlnx_crtc);
+}
+
+static void xlnx_pl_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ drm_crtc_vblank_on(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static void xlnx_pl_disp_clear_event(struct drm_crtc *crtc)
+{
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+}
+
+static void xlnx_pl_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int vrefresh;
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+ struct videomode vm;
+
+ if (xlnx_pl_disp->vtc_bridge) {
+ /* set video timing */
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+ xlnx_bridge_set_timing(xlnx_pl_disp->vtc_bridge, &vm);
+ xlnx_bridge_enable(xlnx_pl_disp->vtc_bridge);
+ }
+
+ xlnx_pl_disp_plane_enable(crtc->primary);
+
+ /* Delay of 1 vblank interval for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(1 * 1000 / vrefresh);
+}
+
+static void xlnx_pl_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ xlnx_pl_disp_clear_event(crtc);
+ drm_crtc_vblank_off(crtc);
+ xlnx_bridge_disable(xlnx_pl_disp->vtc_bridge);
+
+ /* first field is expected to be bottom so init previous field to top */
+ xlnx_pl_disp->prev_fid = 1;
+}
+
+static int xlnx_pl_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static struct drm_crtc_helper_funcs xlnx_pl_disp_crtc_helper_funcs = {
+ .atomic_enable = xlnx_pl_disp_crtc_atomic_enable,
+ .atomic_disable = xlnx_pl_disp_crtc_atomic_disable,
+ .atomic_check = xlnx_pl_disp_crtc_atomic_check,
+ .atomic_begin = xlnx_pl_disp_crtc_atomic_begin,
+};
+
+static void xlnx_pl_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ xlnx_pl_disp_plane_disable(crtc->primary);
+ drm_crtc_cleanup(crtc);
+}
+
+static int xlnx_pl_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ /*
+ * Use the complete callback for vblank event assuming the dma engine
+ * starts on the next descriptor upon this event. This may not be safe
+ * assumption for some dma engines.
+ */
+ xlnx_pl_disp->callback = xlnx_pl_disp_complete;
+ xlnx_pl_disp->callback_param = xlnx_pl_disp;
+
+ return 0;
+}
+
+static void xlnx_pl_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+ struct xlnx_pl_disp *xlnx_pl_disp = crtc_to_dma(xlnx_crtc);
+
+ xlnx_pl_disp->callback = NULL;
+ xlnx_pl_disp->callback_param = NULL;
+}
+
+static struct drm_crtc_funcs xlnx_pl_disp_crtc_funcs = {
+ .destroy = xlnx_pl_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = xlnx_pl_disp_crtc_enable_vblank,
+ .disable_vblank = xlnx_pl_disp_crtc_disable_vblank,
+};
+
+static int xlnx_pl_disp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+ int ret;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+
+ /* in case of fb IP query the supported formats and there count */
+ xilinx_xdma_get_drm_vid_fmts(xlnx_pl_disp->chan->dma_chan,
+ &num_fmts, &fmts);
+ ret = drm_universal_plane_init(drm, &xlnx_pl_disp->plane, 0,
+ &xlnx_pl_disp_plane_funcs,
+ fmts ? fmts : &xlnx_pl_disp->fmt,
+ num_fmts ? num_fmts : 1,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(&xlnx_pl_disp->plane,
+ &xlnx_pl_disp_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp->plane, NULL,
+ &xlnx_pl_disp_crtc_funcs, NULL);
+ if (ret) {
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&xlnx_pl_disp->xlnx_crtc.crtc,
+ &xlnx_pl_disp_crtc_helper_funcs);
+ xlnx_pl_disp->xlnx_crtc.get_format = &xlnx_pl_disp_get_format;
+ xlnx_pl_disp->xlnx_crtc.get_align = &xlnx_pl_disp_get_align;
+ xlnx_pl_disp->drm = drm;
+ xlnx_crtc_register(xlnx_pl_disp->drm, &xlnx_pl_disp->xlnx_crtc);
+
+ return 0;
+}
+
+static void xlnx_pl_disp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = dev_get_drvdata(dev);
+
+ drm_plane_cleanup(&xlnx_pl_disp->plane);
+ drm_crtc_cleanup(&xlnx_pl_disp->xlnx_crtc.crtc);
+}
+
+static const struct component_ops xlnx_pl_disp_component_ops = {
+ .bind = xlnx_pl_disp_bind,
+ .unbind = xlnx_pl_disp_unbind,
+};
+
+static int xlnx_pl_disp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtc_node;
+ struct xlnx_pl_disp *xlnx_pl_disp;
+ int ret;
+ const char *vformat;
+ struct dma_chan *dma_chan;
+ struct xlnx_dma_chan *xlnx_dma_chan;
+
+ xlnx_pl_disp = devm_kzalloc(dev, sizeof(*xlnx_pl_disp), GFP_KERNEL);
+ if (!xlnx_pl_disp)
+ return -ENOMEM;
+
+ dma_chan = of_dma_request_slave_channel(dev->of_node, "dma0");
+ if (IS_ERR_OR_NULL(dma_chan)) {
+ dev_err(dev, "failed to request dma channel\n");
+ return PTR_ERR(dma_chan);
+ }
+
+ xlnx_dma_chan = devm_kzalloc(dev, sizeof(*xlnx_dma_chan), GFP_KERNEL);
+ if (!xlnx_dma_chan)
+ return -ENOMEM;
+
+ xlnx_dma_chan->dma_chan = dma_chan;
+ xlnx_pl_disp->chan = xlnx_dma_chan;
+ ret = of_property_read_string(dev->of_node, "xlnx,vformat", &vformat);
+ if (ret) {
+ dev_err(dev, "No xlnx,vformat value in dts\n");
+ goto err_dma;
+ }
+
+ strcpy((char *)&xlnx_pl_disp->fmt, vformat);
+
+ /* VTC Bridge support */
+ vtc_node = of_parse_phandle(dev->of_node, "xlnx,bridge", 0);
+ if (vtc_node) {
+ xlnx_pl_disp->vtc_bridge = of_xlnx_bridge_get(vtc_node);
+ if (!xlnx_pl_disp->vtc_bridge) {
+ dev_info(dev, "Didn't get vtc bridge instance\n");
+ return -EPROBE_DEFER;
+ }
+ } else {
+ dev_info(dev, "vtc bridge property not present\n");
+ }
+
+ xlnx_pl_disp->dev = dev;
+ platform_set_drvdata(pdev, xlnx_pl_disp);
+
+ ret = component_add(dev, &xlnx_pl_disp_component_ops);
+ if (ret)
+ goto err_dma;
+
+ xlnx_pl_disp->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(xlnx_pl_disp->master)) {
+ ret = PTR_ERR(xlnx_pl_disp->master);
+ dev_err(dev, "failed to initialize the drm pipeline\n");
+ goto err_component;
+ }
+
+ /* first field is expected to be bottom so init previous field to top */
+ xlnx_pl_disp->prev_fid = 1;
+
+ dev_info(&pdev->dev, "Xlnx PL display driver probed\n");
+
+ return 0;
+
+err_component:
+ component_del(dev, &xlnx_pl_disp_component_ops);
+err_dma:
+ dma_release_channel(xlnx_pl_disp->chan->dma_chan);
+
+ return ret;
+}
+
+static int xlnx_pl_disp_remove(struct platform_device *pdev)
+{
+ struct xlnx_pl_disp *xlnx_pl_disp = platform_get_drvdata(pdev);
+ struct xlnx_dma_chan *xlnx_dma_chan = xlnx_pl_disp->chan;
+
+ of_xlnx_bridge_put(xlnx_pl_disp->vtc_bridge);
+ xlnx_drm_pipeline_exit(xlnx_pl_disp->master);
+ component_del(&pdev->dev, &xlnx_pl_disp_component_ops);
+
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_sync(xlnx_dma_chan->dma_chan);
+ dma_release_channel(xlnx_dma_chan->dma_chan);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_pl_disp_of_match[] = {
+ { .compatible = "xlnx,pl-disp"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_pl_disp_of_match);
+
+static struct platform_driver xlnx_pl_disp_driver = {
+ .probe = xlnx_pl_disp_probe,
+ .remove = xlnx_pl_disp_remove,
+ .driver = {
+ .name = "xlnx-pl-disp",
+ .of_match_table = xlnx_pl_disp_of_match,
+ },
+};
+
+module_platform_driver(xlnx_pl_disp_driver);
+
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_DESCRIPTION("Xilinx DRM Display Driver for PL IPs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_scaler.c b/drivers/gpu/drm/xlnx/xlnx_scaler.c
new file mode 100644
index 000000000000..4ffff35e0486
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_scaler.c
@@ -0,0 +1,1980 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VPSS SCALER DRM bridge driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Venkateshwar rao G <vgannava@xilinx.com>
+ * Rohit Athavale <rathavale@xilinx.com>
+ */
+
+/*
+ * Overview:
+ * This experimentatl driver works as a bridge driver and
+ * reused the code from V4L2.
+ * TODO:
+ * Need to implement in a modular approach to share driver code between
+ * V4L2 and DRM frameworks.
+ * Should be integrated with plane.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <uapi/linux/media-bus-format.h>
+
+#include "xlnx_bridge.h"
+
+#define XSCALER_MAX_WIDTH (3840)
+#define XSCALER_MAX_HEIGHT (2160)
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MIN_HEIGHT (64)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* SCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX (0x4000)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+/* Coefficients for 6, 8, 10 and 12 tap filters */
+
+static const s16
+XV_lanczos2_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 0, 0, 4096, 0, 0, 0, },
+ { 0, -40, 4099, 42, 0, -5, },
+ { -1, -77, 4097, 87, -1, -9, },
+ { -2, -111, 4092, 134, -2, -15, },
+ { -4, -143, 4082, 184, -4, -19, },
+ { -6, -173, 4068, 237, -7, -23, },
+ { -8, -201, 4051, 292, -10, -28, },
+ { -11, -226, 4029, 350, -13, -33, },
+ { -14, -248, 4003, 411, -18, -38, },
+ { -17, -269, 3974, 474, -23, -43, },
+ { -21, -287, 3940, 539, -28, -47, },
+ { -24, -303, 3903, 608, -34, -54, },
+ { -28, -317, 3862, 678, -41, -58, },
+ { -32, -329, 3817, 751, -49, -62, },
+ { -37, -339, 3768, 826, -57, -65, },
+ { -41, -347, 3716, 903, -65, -70, },
+ { -45, -353, 3661, 982, -75, -74, },
+ { -50, -358, 3602, 1063, -84, -77, },
+ { -54, -361, 3539, 1146, -95, -79, },
+ { -58, -362, 3474, 1230, -106, -82, },
+ { -62, -361, 3406, 1317, -117, -87, },
+ { -66, -359, 3335, 1404, -128, -90, },
+ { -70, -356, 3261, 1493, -140, -92, },
+ { -74, -351, 3185, 1583, -153, -94, },
+ { -77, -346, 3106, 1673, -165, -95, },
+ { -81, -339, 3025, 1765, -178, -96, },
+ { -84, -331, 2942, 1857, -191, -97, },
+ { -87, -322, 2858, 1950, -204, -99, },
+ { -89, -313, 2771, 2043, -217, -99, },
+ { -92, -302, 2683, 2136, -230, -99, },
+ { -94, -292, 2594, 2228, -243, -97, },
+ { -95, -280, 2504, 2321, -256, -98, },
+ { -97, -268, 2413, 2413, -268, -97, },
+ { -97, -256, 2321, 2504, -280, -96, },
+ { -98, -243, 2228, 2594, -292, -93, },
+ { -98, -230, 2136, 2683, -302, -93, },
+ { -98, -217, 2043, 2771, -313, -90, },
+ { -98, -204, 1950, 2858, -322, -88, },
+ { -97, -191, 1857, 2942, -331, -84, },
+ { -96, -178, 1765, 3025, -339, -81, },
+ { -95, -165, 1673, 3106, -346, -77, },
+ { -93, -153, 1583, 3185, -351, -75, },
+ { -91, -140, 1493, 3261, -356, -71, },
+ { -89, -128, 1404, 3335, -359, -67, },
+ { -86, -117, 1317, 3406, -361, -63, },
+ { -83, -106, 1230, 3474, -362, -57, },
+ { -80, -95, 1146, 3539, -361, -53, },
+ { -77, -84, 1063, 3602, -358, -50, },
+ { -73, -75, 982, 3661, -353, -46, },
+ { -69, -65, 903, 3716, -347, -42, },
+ { -65, -57, 826, 3768, -339, -37, },
+ { -61, -49, 751, 3817, -329, -33, },
+ { -57, -41, 678, 3862, -317, -29, },
+ { -52, -34, 608, 3903, -303, -26, },
+ { -47, -28, 539, 3940, -287, -21, },
+ { -43, -23, 474, 3974, -269, -17, },
+ { -38, -18, 411, 4003, -248, -14, },
+ { -33, -13, 350, 4029, -226, -11, },
+ { -28, -10, 292, 4051, -201, -8, },
+ { -24, -7, 237, 4068, -173, -5, },
+ { -19, -4, 184, 4082, -143, -4, },
+ { -14, -2, 134, 4092, -111, -3, },
+ { -9, -1, 87, 4097, -77, -1, },
+ { -5, 0, 42, 4099, -40, 0, }
+};
+
+/* ScalingRatio = 1.25 */
+static const s16
+XV_fixedcoeff_taps6_SR1p2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -102, 512, 3208, 512, -102, 68, },
+ { -97, 471, 3209, 555, -107, 65, },
+ { -92, 431, 3208, 599, -113, 63, },
+ { -87, 392, 3205, 645, -118, 59, },
+ { -82, 354, 3199, 691, -124, 58, },
+ { -77, 318, 3191, 739, -130, 55, },
+ { -72, 282, 3181, 788, -136, 53, },
+ { -68, 248, 3169, 838, -141, 50, },
+ { -64, 216, 3155, 889, -147, 47, },
+ { -59, 184, 3139, 941, -153, 44, },
+ { -55, 154, 3120, 993, -158, 42, },
+ { -52, 125, 3100, 1047, -164, 40, },
+ { -48, 98, 3077, 1101, -169, 37, },
+ { -44, 71, 3052, 1157, -174, 34, },
+ { -41, 46, 3025, 1212, -180, 34, },
+ { -38, 23, 2996, 1269, -184, 30, },
+ { -35, 0, 2965, 1326, -189, 29, },
+ { -32, -21, 2933, 1383, -193, 26, },
+ { -29, -41, 2898, 1441, -198, 25, },
+ { -26, -60, 2862, 1500, -201, 21, },
+ { -24, -78, 2823, 1558, -205, 22, },
+ { -21, -94, 2784, 1617, -208, 18, },
+ { -19, -109, 2742, 1676, -210, 16, },
+ { -17, -123, 2699, 1734, -212, 15, },
+ { -14, -136, 2654, 1793, -214, 13, },
+ { -12, -148, 2608, 1852, -214, 10, },
+ { -10, -159, 2560, 1910, -215, 10, },
+ { -9, -168, 2512, 1968, -215, 8, },
+ { -7, -177, 2461, 2026, -214, 7, },
+ { -5, -185, 2410, 2083, -212, 5, },
+ { -3, -192, 2358, 2139, -209, 3, },
+ { -2, -197, 2304, 2195, -206, 2, },
+ { 0, -202, 2250, 2250, -202, 0, },
+ { 2, -206, 2195, 2304, -197, -2, },
+ { 3, -209, 2139, 2358, -192, -3, },
+ { 5, -212, 2083, 2410, -185, -5, },
+ { 6, -214, 2026, 2461, -177, -6, },
+ { 8, -215, 1968, 2512, -168, -9, },
+ { 10, -215, 1910, 2560, -159, -10, },
+ { 11, -214, 1852, 2608, -148, -13, },
+ { 13, -214, 1793, 2654, -136, -14, },
+ { 15, -212, 1734, 2699, -123, -17, },
+ { 17, -210, 1676, 2742, -109, -20, },
+ { 18, -208, 1617, 2784, -94, -21, },
+ { 20, -205, 1558, 2823, -78, -22, },
+ { 22, -201, 1500, 2862, -60, -27, },
+ { 24, -198, 1441, 2898, -41, -28, },
+ { 26, -193, 1383, 2933, -21, -32, },
+ { 28, -189, 1326, 2965, 0, -34, },
+ { 30, -184, 1269, 2996, 23, -38, },
+ { 33, -180, 1212, 3025, 46, -40, },
+ { 35, -174, 1157, 3052, 71, -45, },
+ { 37, -169, 1101, 3077, 98, -48, },
+ { 40, -164, 1047, 3100, 125, -52, },
+ { 42, -158, 993, 3120, 154, -55, },
+ { 44, -153, 941, 3139, 184, -59, },
+ { 47, -147, 889, 3155, 216, -64, },
+ { 50, -141, 838, 3169, 248, -68, },
+ { 52, -136, 788, 3181, 282, -71, },
+ { 55, -130, 739, 3191, 318, -77, },
+ { 57, -124, 691, 3199, 354, -81, },
+ { 60, -118, 645, 3205, 392, -88, },
+ { 63, -113, 599, 3208, 431, -92, },
+ { 65, -107, 555, 3209, 471, -97, }
+};
+
+/* ScalingRatio = 2.0 */
+static const s16
+XV_fixedcoeff_taps6_SR2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 0, 970, 2235, 970, 0, -79, },
+ { -3, 943, 2233, 997, 3, -77, },
+ { -5, 915, 2231, 1025, 6, -76, },
+ { -8, 888, 2227, 1052, 10, -73, },
+ { -10, 861, 2223, 1079, 14, -71, },
+ { -12, 834, 2218, 1107, 18, -69, },
+ { -14, 808, 2213, 1134, 22, -67, },
+ { -15, 782, 2206, 1162, 27, -66, },
+ { -17, 756, 2199, 1189, 32, -63, },
+ { -18, 731, 2191, 1217, 37, -62, },
+ { -20, 706, 2182, 1245, 42, -59, },
+ { -21, 681, 2172, 1272, 48, -56, },
+ { -22, 657, 2162, 1300, 55, -56, },
+ { -22, 633, 2151, 1327, 61, -54, },
+ { -23, 609, 2139, 1355, 68, -52, },
+ { -24, 586, 2126, 1382, 76, -50, },
+ { -25, 564, 2113, 1410, 83, -49, },
+ { -25, 541, 2099, 1437, 91, -47, },
+ { -26, 520, 2084, 1464, 100, -46, },
+ { -26, 498, 2069, 1491, 109, -45, },
+ { -27, 477, 2053, 1517, 118, -42, },
+ { -27, 457, 2036, 1544, 128, -42, },
+ { -27, 437, 2019, 1570, 138, -41, },
+ { -28, 418, 2001, 1596, 148, -39, },
+ { -28, 399, 1983, 1622, 160, -40, },
+ { -29, 380, 1964, 1647, 171, -37, },
+ { -29, 362, 1944, 1672, 183, -36, },
+ { -29, 345, 1924, 1697, 195, -36, },
+ { -30, 328, 1903, 1722, 208, -35, },
+ { -30, 311, 1882, 1746, 221, -34, },
+ { -31, 295, 1860, 1770, 235, -33, },
+ { -31, 279, 1838, 1793, 249, -32, },
+ { -32, 264, 1816, 1816, 264, -32, },
+ { -32, 249, 1793, 1838, 279, -31, },
+ { -33, 235, 1770, 1860, 295, -31, },
+ { -34, 221, 1746, 1882, 311, -30, },
+ { -35, 208, 1722, 1903, 328, -30, },
+ { -35, 195, 1697, 1924, 345, -30, },
+ { -36, 183, 1672, 1944, 362, -29, },
+ { -37, 171, 1647, 1964, 380, -29, },
+ { -38, 160, 1622, 1983, 399, -30, },
+ { -39, 148, 1596, 2001, 418, -28, },
+ { -40, 138, 1570, 2019, 437, -28, },
+ { -42, 128, 1544, 2036, 457, -27, },
+ { -43, 118, 1517, 2053, 477, -26, },
+ { -44, 109, 1491, 2069, 498, -27, },
+ { -46, 100, 1464, 2084, 520, -26, },
+ { -47, 91, 1437, 2099, 541, -25, },
+ { -49, 83, 1410, 2113, 564, -25, },
+ { -50, 76, 1382, 2126, 586, -24, },
+ { -52, 68, 1355, 2139, 609, -23, },
+ { -54, 61, 1327, 2151, 633, -22, },
+ { -55, 55, 1300, 2162, 657, -23, },
+ { -57, 48, 1272, 2172, 681, -20, },
+ { -59, 42, 1245, 2182, 706, -20, },
+ { -61, 37, 1217, 2191, 731, -19, },
+ { -63, 32, 1189, 2199, 756, -17, },
+ { -65, 27, 1162, 2206, 782, -16, },
+ { -67, 22, 1134, 2213, 808, -14, },
+ { -69, 18, 1107, 2218, 834, -12, },
+ { -71, 14, 1079, 2223, 861, -10, },
+ { -73, 10, 1052, 2227, 888, -8, },
+ { -75, 6, 1025, 2231, 915, -6, },
+ { -77, 3, 997, 2233, 943, -3, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16
+XV_fixedcoeff_taps6_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 126, 1019, 1806, 1019, 126, 0, },
+ { 120, 1000, 1805, 1038, 132, 1, },
+ { 114, 980, 1804, 1057, 138, 3, },
+ { 108, 961, 1802, 1075, 145, 5, },
+ { 103, 942, 1800, 1094, 152, 5, },
+ { 98, 922, 1797, 1113, 159, 7, },
+ { 93, 903, 1794, 1131, 167, 8, },
+ { 88, 884, 1790, 1150, 174, 10, },
+ { 84, 865, 1786, 1168, 182, 11, },
+ { 80, 846, 1782, 1187, 191, 10, },
+ { 76, 827, 1777, 1205, 199, 12, },
+ { 72, 809, 1771, 1223, 208, 13, },
+ { 68, 790, 1766, 1241, 217, 14, },
+ { 65, 772, 1759, 1259, 226, 15, },
+ { 61, 753, 1753, 1277, 236, 16, },
+ { 58, 735, 1746, 1295, 246, 16, },
+ { 56, 717, 1738, 1313, 256, 16, },
+ { 53, 699, 1730, 1330, 266, 18, },
+ { 50, 682, 1722, 1347, 277, 18, },
+ { 48, 664, 1713, 1364, 288, 19, },
+ { 46, 647, 1704, 1381, 299, 19, },
+ { 43, 630, 1694, 1398, 311, 20, },
+ { 41, 613, 1684, 1414, 323, 21, },
+ { 40, 596, 1674, 1430, 335, 21, },
+ { 38, 580, 1663, 1446, 347, 22, },
+ { 36, 563, 1652, 1462, 360, 23, },
+ { 35, 547, 1641, 1478, 373, 22, },
+ { 33, 531, 1629, 1493, 386, 24, },
+ { 32, 516, 1617, 1508, 399, 24, },
+ { 31, 500, 1604, 1523, 413, 25, },
+ { 30, 485, 1592, 1537, 427, 25, },
+ { 29, 470, 1578, 1551, 441, 27, },
+ { 28, 455, 1565, 1565, 455, 28, },
+ { 27, 441, 1551, 1578, 470, 29, },
+ { 26, 427, 1537, 1592, 485, 29, },
+ { 25, 413, 1523, 1604, 500, 31, },
+ { 24, 399, 1508, 1617, 516, 32, },
+ { 24, 386, 1493, 1629, 531, 33, },
+ { 23, 373, 1478, 1641, 547, 34, },
+ { 22, 360, 1462, 1652, 563, 37, },
+ { 22, 347, 1446, 1663, 580, 38, },
+ { 21, 335, 1430, 1674, 596, 40, },
+ { 20, 323, 1414, 1684, 613, 42, },
+ { 20, 311, 1398, 1694, 630, 43, },
+ { 19, 299, 1381, 1704, 647, 46, },
+ { 19, 288, 1364, 1713, 664, 48, },
+ { 18, 277, 1347, 1722, 682, 50, },
+ { 17, 266, 1330, 1730, 699, 54, },
+ { 17, 256, 1313, 1738, 717, 55, },
+ { 16, 246, 1295, 1746, 735, 58, },
+ { 15, 236, 1277, 1753, 753, 62, },
+ { 15, 226, 1259, 1759, 772, 65, },
+ { 14, 217, 1241, 1766, 790, 68, },
+ { 13, 208, 1223, 1771, 809, 72, },
+ { 12, 199, 1205, 1777, 827, 76, },
+ { 11, 191, 1187, 1782, 846, 79, },
+ { 10, 182, 1168, 1786, 865, 85, },
+ { 9, 174, 1150, 1790, 884, 89, },
+ { 8, 167, 1131, 1794, 903, 93, },
+ { 7, 159, 1113, 1797, 922, 98, },
+ { 6, 152, 1094, 1800, 942, 102, },
+ { 5, 145, 1075, 1802, 961, 108, },
+ { 3, 138, 1057, 1804, 980, 114, },
+ { 2, 132, 1038, 1805, 1000, 119, }
+};
+
+/* ScalingRatio = 4 */
+static const s16
+XV_fixedcoeff_taps6_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 176, 1009, 1643, 1009, 176, 83, },
+ { 169, 993, 1644, 1026, 183, 81, },
+ { 162, 978, 1644, 1042, 190, 80, },
+ { 156, 962, 1643, 1058, 198, 79, },
+ { 150, 946, 1642, 1074, 205, 79, },
+ { 144, 930, 1641, 1091, 213, 77, },
+ { 138, 914, 1640, 1107, 222, 75, },
+ { 133, 898, 1638, 1123, 230, 74, },
+ { 128, 882, 1635, 1139, 239, 73, },
+ { 123, 866, 1633, 1154, 248, 72, },
+ { 118, 850, 1629, 1170, 257, 72, },
+ { 114, 834, 1626, 1186, 267, 69, },
+ { 109, 818, 1622, 1201, 276, 70, },
+ { 105, 802, 1618, 1217, 286, 68, },
+ { 101, 786, 1613, 1232, 297, 67, },
+ { 97, 771, 1608, 1247, 307, 66, },
+ { 94, 755, 1603, 1262, 318, 64, },
+ { 91, 739, 1597, 1276, 328, 65, },
+ { 87, 724, 1591, 1291, 339, 64, },
+ { 85, 708, 1585, 1305, 351, 62, },
+ { 82, 693, 1578, 1319, 362, 62, },
+ { 79, 677, 1571, 1333, 374, 62, },
+ { 77, 662, 1563, 1347, 386, 61, },
+ { 75, 647, 1556, 1360, 398, 60, },
+ { 73, 632, 1547, 1373, 410, 61, },
+ { 71, 617, 1539, 1386, 423, 60, },
+ { 69, 602, 1530, 1399, 436, 60, },
+ { 68, 587, 1521, 1412, 449, 59, },
+ { 66, 573, 1511, 1424, 462, 60, },
+ { 65, 558, 1501, 1436, 475, 61, },
+ { 64, 544, 1491, 1447, 488, 62, },
+ { 63, 530, 1481, 1459, 502, 61, },
+ { 62, 516, 1470, 1470, 516, 62, },
+ { 62, 502, 1459, 1481, 530, 62, },
+ { 61, 488, 1447, 1491, 544, 65, },
+ { 61, 475, 1436, 1501, 558, 65, },
+ { 60, 462, 1424, 1511, 573, 66, },
+ { 60, 449, 1412, 1521, 587, 67, },
+ { 60, 436, 1399, 1530, 602, 69, },
+ { 60, 423, 1386, 1539, 617, 71, },
+ { 61, 410, 1373, 1547, 632, 73, },
+ { 61, 398, 1360, 1556, 647, 74, },
+ { 61, 386, 1347, 1563, 662, 77, },
+ { 62, 374, 1333, 1571, 677, 79, },
+ { 62, 362, 1319, 1578, 693, 82, },
+ { 63, 351, 1305, 1585, 708, 84, },
+ { 64, 339, 1291, 1591, 724, 87, },
+ { 64, 328, 1276, 1597, 739, 92, },
+ { 65, 318, 1262, 1603, 755, 93, },
+ { 66, 307, 1247, 1608, 771, 97, },
+ { 67, 297, 1232, 1613, 786, 101, },
+ { 68, 286, 1217, 1618, 802, 105, },
+ { 69, 276, 1201, 1622, 818, 110, },
+ { 70, 267, 1186, 1626, 834, 113, },
+ { 71, 257, 1170, 1629, 850, 119, },
+ { 72, 248, 1154, 1633, 866, 123, },
+ { 73, 239, 1139, 1635, 882, 128, },
+ { 75, 230, 1123, 1638, 898, 132, },
+ { 76, 222, 1107, 1640, 914, 137, },
+ { 77, 213, 1091, 1641, 930, 144, },
+ { 78, 205, 1074, 1642, 946, 151, },
+ { 79, 198, 1058, 1643, 962, 156, },
+ { 80, 190, 1042, 1644, 978, 162, },
+ { 82, 183, 1026, 1644, 993, 168, }
+};
+
+/* ScalingRatio = 2.0 */
+static const s16
+XV_fixedcoeff_taps8_SR2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { -55, 0, 1078, 2049, 1078, 0, -55, 1, },
+ { -53, -7, 1055, 2049, 1102, 7, -56, -1, },
+ { -52, -13, 1032, 2048, 1126, 15, -58, -2, },
+ { -50, -20, 1009, 2047, 1149, 22, -59, -2, },
+ { -49, -26, 986, 2046, 1173, 31, -61, -4, },
+ { -47, -31, 963, 2043, 1197, 39, -62, -6, },
+ { -46, -37, 940, 2040, 1220, 48, -64, -5, },
+ { -45, -42, 917, 2037, 1244, 57, -65, -7, },
+ { -43, -47, 894, 2033, 1267, 66, -67, -7, },
+ { -42, -51, 871, 2028, 1290, 76, -69, -7, },
+ { -41, -55, 848, 2023, 1313, 86, -70, -8, },
+ { -40, -59, 826, 2017, 1336, 97, -72, -9, },
+ { -38, -63, 803, 2010, 1359, 108, -73, -10, },
+ { -37, -67, 781, 2003, 1382, 119, -75, -10, },
+ { -36, -70, 759, 1996, 1405, 130, -76, -12, },
+ { -35, -73, 737, 1987, 1427, 142, -78, -11, },
+ { -34, -76, 715, 1979, 1449, 154, -79, -12, },
+ { -33, -78, 693, 1969, 1471, 167, -81, -12, },
+ { -32, -81, 672, 1959, 1493, 180, -82, -13, },
+ { -31, -83, 650, 1949, 1514, 193, -83, -13, },
+ { -30, -85, 629, 1938, 1536, 207, -85, -14, },
+ { -29, -86, 609, 1926, 1557, 221, -86, -16, },
+ { -28, -88, 588, 1914, 1577, 235, -87, -15, },
+ { -28, -89, 568, 1902, 1598, 250, -88, -17, },
+ { -27, -90, 548, 1889, 1618, 265, -89, -18, },
+ { -26, -91, 528, 1875, 1638, 280, -90, -18, },
+ { -25, -92, 508, 1861, 1657, 296, -91, -18, },
+ { -24, -93, 489, 1846, 1676, 312, -92, -18, },
+ { -24, -93, 470, 1831, 1695, 328, -92, -19, },
+ { -23, -94, 451, 1816, 1714, 345, -93, -20, },
+ { -22, -94, 432, 1800, 1732, 361, -93, -20, },
+ { -22, -94, 414, 1783, 1749, 379, -94, -19, },
+ { -21, -94, 396, 1767, 1767, 396, -94, -21, },
+ { -21, -94, 379, 1749, 1783, 414, -94, -20, },
+ { -20, -93, 361, 1732, 1800, 432, -94, -22, },
+ { -19, -93, 345, 1714, 1816, 451, -94, -24, },
+ { -19, -92, 328, 1695, 1831, 470, -93, -24, },
+ { -18, -92, 312, 1676, 1846, 489, -93, -24, },
+ { -18, -91, 296, 1657, 1861, 508, -92, -25, },
+ { -17, -90, 280, 1638, 1875, 528, -91, -27, },
+ { -17, -89, 265, 1618, 1889, 548, -90, -28, },
+ { -16, -88, 250, 1598, 1902, 568, -89, -29, },
+ { -16, -87, 235, 1577, 1914, 588, -88, -27, },
+ { -15, -86, 221, 1557, 1926, 609, -86, -30, },
+ { -14, -85, 207, 1536, 1938, 629, -85, -30, },
+ { -14, -83, 193, 1514, 1949, 650, -83, -30, },
+ { -13, -82, 180, 1493, 1959, 672, -81, -32, },
+ { -13, -81, 167, 1471, 1969, 693, -78, -32, },
+ { -12, -79, 154, 1449, 1979, 715, -76, -34, },
+ { -12, -78, 142, 1427, 1987, 737, -73, -34, },
+ { -11, -76, 130, 1405, 1996, 759, -70, -37, },
+ { -10, -75, 119, 1382, 2003, 781, -67, -37, },
+ { -10, -73, 108, 1359, 2010, 803, -63, -38, },
+ { -9, -72, 97, 1336, 2017, 826, -59, -40, },
+ { -8, -70, 86, 1313, 2023, 848, -55, -41, },
+ { -8, -69, 76, 1290, 2028, 871, -51, -41, },
+ { -7, -67, 66, 1267, 2033, 894, -47, -43, },
+ { -6, -65, 57, 1244, 2037, 917, -42, -46, },
+ { -5, -64, 48, 1220, 2040, 940, -37, -46, },
+ { -5, -62, 39, 1197, 2043, 963, -31, -48, },
+ { -4, -61, 31, 1173, 2046, 986, -26, -49, },
+ { -3, -59, 22, 1149, 2047, 1009, -20, -49, },
+ { -2, -58, 15, 1126, 2048, 1032, -13, -52, },
+ { -1, -56, 7, 1102, 2049, 1055, -7, -53, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16
+XV_fixedcoeff_taps8_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { 0, 275, 1036, 1514, 1036, 275, 0, -40, },
+ { -1, 266, 1023, 1514, 1048, 283, 1, -38, },
+ { -2, 257, 1010, 1513, 1060, 292, 2, -36, },
+ { -3, 249, 997, 1512, 1073, 301, 3, -36, },
+ { -3, 241, 983, 1510, 1085, 310, 5, -35, },
+ { -4, 233, 970, 1509, 1097, 319, 6, -34, },
+ { -5, 225, 957, 1507, 1109, 329, 7, -33, },
+ { -6, 217, 944, 1505, 1121, 338, 9, -32, },
+ { -6, 210, 931, 1503, 1133, 348, 10, -33, },
+ { -7, 202, 917, 1500, 1144, 358, 12, -30, },
+ { -7, 195, 904, 1497, 1156, 368, 13, -30, },
+ { -8, 188, 891, 1494, 1167, 378, 15, -29, },
+ { -9, 181, 877, 1491, 1179, 388, 17, -28, },
+ { -9, 174, 864, 1487, 1190, 398, 19, -27, },
+ { -9, 168, 851, 1483, 1201, 409, 21, -28, },
+ { -10, 161, 837, 1479, 1212, 419, 23, -25, },
+ { -10, 155, 824, 1475, 1223, 430, 25, -26, },
+ { -11, 149, 811, 1470, 1233, 441, 27, -24, },
+ { -11, 142, 798, 1465, 1244, 452, 29, -23, },
+ { -12, 137, 784, 1460, 1254, 463, 32, -22, },
+ { -12, 131, 771, 1455, 1264, 474, 34, -21, },
+ { -12, 125, 758, 1449, 1275, 486, 37, -22, },
+ { -13, 120, 745, 1444, 1284, 497, 40, -21, },
+ { -13, 115, 732, 1438, 1294, 509, 42, -21, },
+ { -13, 109, 719, 1432, 1304, 520, 45, -20, },
+ { -14, 104, 706, 1425, 1313, 532, 48, -18, },
+ { -14, 100, 693, 1418, 1322, 544, 52, -19, },
+ { -14, 95, 680, 1412, 1332, 556, 55, -20, },
+ { -15, 90, 667, 1404, 1340, 568, 58, -16, },
+ { -15, 86, 655, 1397, 1349, 580, 62, -18, },
+ { -16, 82, 642, 1390, 1358, 592, 66, -18, },
+ { -16, 77, 630, 1382, 1366, 605, 69, -17, },
+ { -16, 73, 617, 1374, 1374, 617, 73, -16, },
+ { -17, 69, 605, 1366, 1382, 630, 77, -16, },
+ { -17, 66, 592, 1358, 1390, 642, 82, -17, },
+ { -18, 62, 580, 1349, 1397, 655, 86, -15, },
+ { -18, 58, 568, 1340, 1404, 667, 90, -13, },
+ { -18, 55, 556, 1332, 1412, 680, 95, -16, },
+ { -19, 52, 544, 1322, 1418, 693, 100, -14, },
+ { -19, 48, 532, 1313, 1425, 706, 104, -13, },
+ { -20, 45, 520, 1304, 1432, 719, 109, -13, },
+ { -20, 42, 509, 1294, 1438, 732, 115, -14, },
+ { -21, 40, 497, 1284, 1444, 745, 120, -13, },
+ { -22, 37, 486, 1275, 1449, 758, 125, -12, },
+ { -22, 34, 474, 1264, 1455, 771, 131, -11, },
+ { -23, 32, 463, 1254, 1460, 784, 137, -11, },
+ { -23, 29, 452, 1244, 1465, 798, 142, -11, },
+ { -24, 27, 441, 1233, 1470, 811, 149, -11, },
+ { -25, 25, 430, 1223, 1475, 824, 155, -11, },
+ { -26, 23, 419, 1212, 1479, 837, 161, -9, },
+ { -26, 21, 409, 1201, 1483, 851, 168, -11, },
+ { -27, 19, 398, 1190, 1487, 864, 174, -9, },
+ { -28, 17, 388, 1179, 1491, 877, 181, -9, },
+ { -29, 15, 378, 1167, 1494, 891, 188, -8, },
+ { -29, 13, 368, 1156, 1497, 904, 195, -8, },
+ { -30, 12, 358, 1144, 1500, 917, 202, -7, },
+ { -31, 10, 348, 1133, 1503, 931, 210, -8, },
+ { -32, 9, 338, 1121, 1505, 944, 217, -6, },
+ { -33, 7, 329, 1109, 1507, 957, 225, -5, },
+ { -34, 6, 319, 1097, 1509, 970, 233, -4, },
+ { -35, 5, 310, 1085, 1510, 983, 241, -3, },
+ { -36, 3, 301, 1073, 1512, 997, 249, -3, },
+ { -37, 2, 292, 1060, 1513, 1010, 257, -1, },
+ { -38, 1, 283, 1048, 1514, 1023, 266, -1, }
+};
+
+/* ScalingRatio = 4 */
+static const s16
+XV_fixedcoeff_taps8_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { 49, 366, 977, 1312, 977, 366, 49, 0, },
+ { 48, 357, 967, 1312, 986, 374, 51, 1, },
+ { 46, 349, 958, 1311, 995, 382, 54, 1, },
+ { 44, 342, 948, 1311, 1004, 390, 56, 1, },
+ { 42, 334, 939, 1310, 1013, 399, 58, 1, },
+ { 40, 326, 929, 1309, 1021, 407, 60, 4, },
+ { 39, 318, 919, 1308, 1030, 415, 63, 4, },
+ { 37, 311, 910, 1307, 1039, 424, 65, 3, },
+ { 36, 303, 900, 1305, 1047, 433, 68, 4, },
+ { 34, 296, 890, 1303, 1055, 442, 70, 6, },
+ { 33, 289, 880, 1301, 1064, 450, 73, 6, },
+ { 32, 282, 870, 1299, 1072, 459, 76, 6, },
+ { 31, 275, 861, 1297, 1080, 468, 79, 5, },
+ { 29, 268, 851, 1295, 1088, 477, 82, 6, },
+ { 28, 261, 841, 1292, 1096, 486, 85, 7, },
+ { 27, 254, 831, 1289, 1104, 496, 88, 7, },
+ { 26, 248, 821, 1287, 1112, 505, 91, 6, },
+ { 25, 241, 811, 1284, 1119, 514, 94, 8, },
+ { 24, 235, 800, 1280, 1127, 523, 98, 9, },
+ { 23, 228, 790, 1277, 1134, 533, 101, 10, },
+ { 22, 222, 780, 1273, 1141, 542, 105, 11, },
+ { 22, 216, 770, 1270, 1148, 552, 109, 9, },
+ { 21, 210, 760, 1266, 1155, 561, 112, 11, },
+ { 20, 204, 750, 1262, 1162, 571, 116, 11, },
+ { 19, 198, 740, 1257, 1169, 581, 120, 12, },
+ { 19, 193, 730, 1253, 1175, 590, 124, 12, },
+ { 18, 187, 720, 1248, 1182, 600, 129, 12, },
+ { 17, 182, 710, 1244, 1188, 610, 133, 12, },
+ { 17, 176, 700, 1239, 1194, 620, 137, 13, },
+ { 16, 171, 690, 1234, 1201, 630, 142, 12, },
+ { 16, 166, 680, 1229, 1206, 640, 146, 13, },
+ { 15, 161, 670, 1223, 1212, 650, 151, 14, },
+ { 15, 156, 660, 1218, 1218, 660, 156, 13, },
+ { 14, 151, 650, 1212, 1223, 670, 161, 15, },
+ { 14, 146, 640, 1206, 1229, 680, 166, 15, },
+ { 13, 142, 630, 1201, 1234, 690, 171, 15, },
+ { 13, 137, 620, 1194, 1239, 700, 176, 17, },
+ { 12, 133, 610, 1188, 1244, 710, 182, 17, },
+ { 12, 129, 600, 1182, 1248, 720, 187, 18, },
+ { 11, 124, 590, 1175, 1253, 730, 193, 20, },
+ { 11, 120, 581, 1169, 1257, 740, 198, 20, },
+ { 11, 116, 571, 1162, 1262, 750, 204, 20, },
+ { 10, 112, 561, 1155, 1266, 760, 210, 22, },
+ { 10, 109, 552, 1148, 1270, 770, 216, 21, },
+ { 10, 105, 542, 1141, 1273, 780, 222, 23, },
+ { 9, 101, 533, 1134, 1277, 790, 228, 24, },
+ { 9, 98, 523, 1127, 1280, 800, 235, 24, },
+ { 8, 94, 514, 1119, 1284, 811, 241, 25, },
+ { 8, 91, 505, 1112, 1287, 821, 248, 24, },
+ { 8, 88, 496, 1104, 1289, 831, 254, 26, },
+ { 7, 85, 486, 1096, 1292, 841, 261, 28, },
+ { 7, 82, 477, 1088, 1295, 851, 268, 28, },
+ { 6, 79, 468, 1080, 1297, 861, 275, 30, },
+ { 6, 76, 459, 1072, 1299, 870, 282, 32, },
+ { 5, 73, 450, 1064, 1301, 880, 289, 34, },
+ { 5, 70, 442, 1055, 1303, 890, 296, 35, },
+ { 4, 68, 433, 1047, 1305, 900, 303, 36, },
+ { 4, 65, 424, 1039, 1307, 910, 311, 36, },
+ { 3, 63, 415, 1030, 1308, 919, 318, 40, },
+ { 3, 60, 407, 1021, 1309, 929, 326, 41, },
+ { 2, 58, 399, 1013, 1310, 939, 334, 41, },
+ { 2, 56, 390, 1004, 1311, 948, 342, 43, },
+ { 1, 54, 382, 995, 1311, 958, 349, 46, },
+ { 1, 51, 374, 986, 1312, 967, 357, 48, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16
+XV_fixedcoeff_taps10_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ { -31, 0, 359, 1033, 1399, 1033, 359, 0, -31, -25, },
+ { -31, -2, 350, 1022, 1398, 1043, 368, 3, -31, -24, },
+ { -30, -4, 341, 1012, 1398, 1053, 378, 5, -32, -25, },
+ { -30, -6, 333, 1002, 1398, 1062, 387, 8, -32, -26, },
+ { -30, -8, 324, 992, 1397, 1072, 396, 10, -32, -25, },
+ { -30, -10, 315, 981, 1396, 1082, 406, 13, -33, -24, },
+ { -29, -12, 307, 971, 1395, 1091, 415, 16, -33, -25, },
+ { -29, -13, 298, 960, 1393, 1101, 425, 18, -33, -24, },
+ { -29, -15, 290, 949, 1392, 1110, 434, 21, -34, -22, },
+ { -28, -17, 282, 939, 1390, 1120, 444, 25, -34, -25, },
+ { -28, -18, 274, 928, 1388, 1129, 454, 28, -34, -25, },
+ { -28, -20, 266, 917, 1386, 1138, 464, 31, -34, -24, },
+ { -27, -21, 258, 906, 1384, 1147, 474, 34, -35, -24, },
+ { -27, -22, 250, 895, 1381, 1156, 484, 38, -35, -24, },
+ { -27, -23, 242, 885, 1379, 1164, 494, 41, -35, -24, },
+ { -27, -25, 235, 874, 1376, 1173, 504, 45, -35, -24, },
+ { -26, -26, 227, 863, 1373, 1181, 515, 49, -36, -24, },
+ { -26, -27, 220, 852, 1369, 1190, 525, 53, -36, -24, },
+ { -26, -28, 213, 841, 1366, 1198, 535, 56, -36, -23, },
+ { -26, -29, 206, 830, 1362, 1206, 546, 61, -36, -24, },
+ { -25, -29, 199, 819, 1358, 1214, 556, 65, -36, -25, },
+ { -25, -30, 192, 808, 1354, 1222, 567, 69, -36, -25, },
+ { -25, -31, 185, 797, 1350, 1229, 577, 73, -36, -23, },
+ { -25, -32, 178, 785, 1346, 1237, 588, 78, -36, -23, },
+ { -25, -32, 172, 774, 1341, 1244, 599, 83, -36, -24, },
+ { -25, -33, 165, 763, 1336, 1252, 610, 87, -36, -23, },
+ { -24, -33, 159, 752, 1331, 1259, 620, 92, -36, -24, },
+ { -24, -34, 153, 741, 1326, 1266, 631, 97, -36, -24, },
+ { -24, -34, 147, 730, 1321, 1272, 642, 102, -36, -24, },
+ { -24, -35, 141, 719, 1315, 1279, 653, 107, -36, -23, },
+ { -24, -35, 135, 708, 1310, 1285, 664, 113, -36, -24, },
+ { -24, -35, 129, 697, 1304, 1292, 675, 118, -36, -24, },
+ { -24, -36, 124, 686, 1298, 1298, 686, 124, -36, -24, },
+ { -24, -36, 118, 675, 1292, 1304, 697, 129, -35, -24, },
+ { -24, -36, 113, 664, 1285, 1310, 708, 135, -35, -24, },
+ { -24, -36, 107, 653, 1279, 1315, 719, 141, -35, -23, },
+ { -24, -36, 102, 642, 1272, 1321, 730, 147, -34, -24, },
+ { -23, -36, 97, 631, 1266, 1326, 741, 153, -34, -25, },
+ { -23, -36, 92, 620, 1259, 1331, 752, 159, -33, -25, },
+ { -23, -36, 87, 610, 1252, 1336, 763, 165, -33, -25, },
+ { -23, -36, 83, 599, 1244, 1341, 774, 172, -32, -26, },
+ { -23, -36, 78, 588, 1237, 1346, 785, 178, -32, -25, },
+ { -23, -36, 73, 577, 1229, 1350, 797, 185, -31, -25, },
+ { -23, -36, 69, 567, 1222, 1354, 808, 192, -30, -27, },
+ { -23, -36, 65, 556, 1214, 1358, 819, 199, -29, -27, },
+ { -24, -36, 61, 546, 1206, 1362, 830, 206, -29, -26, },
+ { -24, -36, 56, 535, 1198, 1366, 841, 213, -28, -25, },
+ { -24, -36, 53, 525, 1190, 1369, 852, 220, -27, -26, },
+ { -24, -36, 49, 515, 1181, 1373, 863, 227, -26, -26, },
+ { -24, -35, 45, 504, 1173, 1376, 874, 235, -25, -27, },
+ { -24, -35, 41, 494, 1164, 1379, 885, 242, -23, -27, },
+ { -24, -35, 38, 484, 1156, 1381, 895, 250, -22, -27, },
+ { -24, -35, 34, 474, 1147, 1384, 906, 258, -21, -27, },
+ { -24, -34, 31, 464, 1138, 1386, 917, 266, -20, -28, },
+ { -24, -34, 28, 454, 1129, 1388, 928, 274, -18, -29, },
+ { -24, -34, 25, 444, 1120, 1390, 939, 282, -17, -29, },
+ { -24, -34, 21, 434, 1110, 1392, 949, 290, -15, -27, },
+ { -24, -33, 18, 425, 1101, 1393, 960, 298, -13, -29, },
+ { -24, -33, 16, 415, 1091, 1395, 971, 307, -12, -30, },
+ { -25, -33, 13, 406, 1082, 1396, 981, 315, -10, -29, },
+ { -25, -32, 10, 396, 1072, 1397, 992, 324, -8, -30, },
+ { -25, -32, 8, 387, 1062, 1398, 1002, 333, -6, -31, },
+ { -25, -32, 5, 378, 1053, 1398, 1012, 341, -4, -30, },
+ { -25, -31, 3, 368, 1043, 1398, 1022, 350, -2, -30, }
+};
+
+/* ScalingRatio = 4 */
+static const s16
+XV_fixedcoeff_taps10_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ { 0, 107, 454, 924, 1150, 924, 454, 107, 0, -24, },
+ { 0, 104, 446, 917, 1149, 930, 461, 110, 0, -21, },
+ { -1, 100, 439, 910, 1149, 936, 468, 114, 1, -20, },
+ { -1, 97, 432, 904, 1149, 942, 475, 117, 2, -21, },
+ { -2, 94, 425, 897, 1148, 948, 482, 121, 2, -19, },
+ { -2, 91, 418, 890, 1147, 954, 490, 125, 3, -20, },
+ { -3, 88, 411, 883, 1147, 960, 497, 128, 3, -18, },
+ { -3, 85, 404, 876, 1146, 966, 504, 132, 4, -18, },
+ { -3, 82, 397, 869, 1145, 972, 512, 136, 5, -19, },
+ { -4, 79, 390, 862, 1144, 978, 519, 140, 5, -17, },
+ { -4, 76, 384, 855, 1142, 983, 526, 144, 6, -16, },
+ { -4, 74, 377, 848, 1141, 989, 534, 148, 7, -18, },
+ { -5, 71, 370, 841, 1139, 995, 541, 152, 7, -15, },
+ { -5, 68, 364, 834, 1138, 1000, 549, 156, 8, -16, },
+ { -5, 66, 357, 827, 1136, 1005, 556, 160, 9, -15, },
+ { -6, 63, 350, 820, 1134, 1011, 564, 165, 10, -15, },
+ { -6, 61, 344, 812, 1132, 1016, 571, 169, 11, -14, },
+ { -6, 59, 338, 805, 1130, 1021, 579, 174, 12, -16, },
+ { -6, 56, 331, 798, 1128, 1026, 586, 178, 13, -14, },
+ { -7, 54, 325, 790, 1126, 1031, 594, 183, 14, -14, },
+ { -7, 52, 319, 783, 1124, 1036, 601, 187, 15, -14, },
+ { -7, 50, 312, 776, 1121, 1041, 609, 192, 16, -14, },
+ { -7, 48, 306, 768, 1119, 1045, 617, 197, 17, -14, },
+ { -8, 46, 300, 761, 1116, 1050, 624, 202, 18, -13, },
+ { -8, 44, 294, 753, 1113, 1054, 632, 207, 19, -12, },
+ { -8, 42, 288, 746, 1110, 1059, 639, 212, 20, -12, },
+ { -8, 40, 282, 738, 1107, 1063, 647, 217, 22, -12, },
+ { -9, 38, 277, 731, 1104, 1067, 655, 222, 23, -12, },
+ { -9, 36, 271, 723, 1101, 1071, 662, 227, 24, -10, },
+ { -9, 35, 265, 715, 1097, 1075, 670, 232, 26, -10, },
+ { -9, 33, 259, 708, 1094, 1079, 677, 238, 27, -10, },
+ { -10, 32, 254, 700, 1091, 1083, 685, 243, 28, -10, },
+ { -10, 30, 248, 693, 1087, 1087, 693, 248, 30, -10, },
+ { -10, 28, 243, 685, 1083, 1091, 700, 254, 32, -10, },
+ { -10, 27, 238, 677, 1079, 1094, 708, 259, 33, -9, },
+ { -11, 26, 232, 670, 1075, 1097, 715, 265, 35, -8, },
+ { -11, 24, 227, 662, 1071, 1101, 723, 271, 36, -8, },
+ { -11, 23, 222, 655, 1067, 1104, 731, 277, 38, -10, },
+ { -12, 22, 217, 647, 1063, 1107, 738, 282, 40, -8, },
+ { -12, 20, 212, 639, 1059, 1110, 746, 288, 42, -8, },
+ { -12, 19, 207, 632, 1054, 1113, 753, 294, 44, -8, },
+ { -12, 18, 202, 624, 1050, 1116, 761, 300, 46, -9, },
+ { -13, 17, 197, 617, 1045, 1119, 768, 306, 48, -8, },
+ { -13, 16, 192, 609, 1041, 1121, 776, 312, 50, -8, },
+ { -13, 15, 187, 601, 1036, 1124, 783, 319, 52, -8, },
+ { -14, 14, 183, 594, 1031, 1126, 790, 325, 54, -7, },
+ { -14, 13, 178, 586, 1026, 1128, 798, 331, 56, -6, },
+ { -14, 12, 174, 579, 1021, 1130, 805, 338, 59, -8, },
+ { -15, 11, 169, 571, 1016, 1132, 812, 344, 61, -5, },
+ { -15, 10, 165, 564, 1011, 1134, 820, 350, 63, -6, },
+ { -16, 9, 160, 556, 1005, 1136, 827, 357, 66, -4, },
+ { -16, 8, 156, 549, 1000, 1138, 834, 364, 68, -5, },
+ { -16, 7, 152, 541, 995, 1139, 841, 370, 71, -4, },
+ { -17, 7, 148, 534, 989, 1141, 848, 377, 74, -5, },
+ { -17, 6, 144, 526, 983, 1142, 855, 384, 76, -3, },
+ { -18, 5, 140, 519, 978, 1144, 862, 390, 79, -3, },
+ { -18, 5, 136, 512, 972, 1145, 869, 397, 82, -4, },
+ { -19, 4, 132, 504, 966, 1146, 876, 404, 85, -2, },
+ { -19, 3, 128, 497, 960, 1147, 883, 411, 88, -2, },
+ { -20, 3, 125, 490, 954, 1147, 890, 418, 91, -2, },
+ { -20, 2, 121, 482, 948, 1148, 897, 425, 94, -1, },
+ { -21, 2, 117, 475, 942, 1149, 904, 432, 97, -1, },
+ { -21, 1, 114, 468, 936, 1149, 910, 439, 100, 0, },
+ { -22, 0, 110, 461, 930, 1149, 917, 446, 104, 1, }
+};
+
+/* ScalingRatio = 4 */
+static const s16
+XV_fixedcoeff_taps12_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ { -19, 0, 152, 498, 893, 1070, 893, 498, 152, 0, -19, -22, },
+ { -19, -1, 147, 487, 879, 1059, 889, 499, 155, 1, -19, 19, },
+ { -19, -2, 143, 480, 874, 1059, 894, 506, 159, 2, -19, 19, },
+ { -19, -3, 139, 474, 869, 1059, 899, 512, 163, 3, -19, 19, },
+ { -19, -4, 136, 468, 863, 1059, 904, 519, 167, 4, -19, 18, },
+ { -19, -5, 132, 461, 858, 1058, 909, 525, 171, 5, -19, 20, },
+ { -19, -5, 128, 455, 853, 1058, 913, 531, 175, 7, -19, 19, },
+ { -18, -6, 125, 449, 847, 1057, 918, 538, 180, 8, -19, 17, },
+ { -18, -7, 121, 443, 842, 1056, 923, 544, 184, 9, -19, 18, },
+ { -18, -8, 118, 436, 836, 1056, 927, 551, 188, 10, -19, 19, },
+ { -18, -8, 114, 430, 831, 1055, 932, 557, 193, 12, -19, 17, },
+ { -18, -9, 111, 424, 825, 1054, 936, 564, 197, 13, -19, 18, },
+ { -18, -10, 107, 418, 819, 1053, 941, 570, 202, 14, -19, 19, },
+ { -18, -10, 104, 412, 814, 1052, 945, 577, 206, 16, -19, 17, },
+ { -18, -11, 101, 406, 808, 1050, 949, 583, 211, 17, -19, 19, },
+ { -18, -11, 98, 400, 802, 1049, 954, 590, 216, 19, -19, 16, },
+ { -18, -12, 95, 394, 796, 1048, 958, 596, 220, 20, -19, 18, },
+ { -18, -12, 92, 388, 791, 1046, 962, 603, 225, 22, -19, 16, },
+ { -18, -13, 89, 382, 785, 1045, 966, 609, 230, 24, -19, 16, },
+ { -18, -13, 86, 376, 779, 1043, 970, 616, 235, 25, -19, 16, },
+ { -18, -14, 83, 370, 773, 1041, 973, 622, 240, 27, -19, 18, },
+ { -18, -14, 80, 364, 767, 1039, 977, 629, 244, 29, -19, 18, },
+ { -18, -15, 77, 358, 761, 1037, 981, 635, 249, 31, -19, 19, },
+ { -18, -15, 74, 352, 755, 1035, 984, 642, 255, 33, -19, 18, },
+ { -18, -15, 71, 347, 749, 1033, 988, 648, 260, 35, -19, 17, },
+ { -18, -16, 69, 341, 743, 1031, 991, 654, 265, 36, -19, 19, },
+ { -18, -16, 66, 335, 736, 1029, 995, 661, 270, 38, -19, 19, },
+ { -18, -16, 64, 330, 730, 1026, 998, 667, 275, 41, -18, 17, },
+ { -18, -17, 61, 324, 724, 1024, 1001, 674, 280, 43, -18, 18, },
+ { -18, -17, 59, 318, 718, 1021, 1004, 680, 286, 45, -18, 18, },
+ { -18, -17, 56, 313, 712, 1019, 1007, 686, 291, 47, -18, 18, },
+ { -18, -17, 54, 307, 705, 1016, 1010, 693, 296, 49, -18, 19, },
+ { -18, -18, 51, 302, 699, 1013, 1013, 699, 302, 51, -18, 20, },
+ { -18, -18, 49, 296, 693, 1010, 1016, 705, 307, 54, -17, 19, },
+ { -18, -18, 47, 291, 686, 1007, 1019, 712, 313, 56, -17, 18, },
+ { -18, -18, 45, 286, 680, 1004, 1021, 718, 318, 59, -17, 18, },
+ { -18, -18, 43, 280, 674, 1001, 1024, 724, 324, 61, -17, 18, },
+ { -18, -18, 41, 275, 667, 998, 1026, 730, 330, 64, -16, 17, },
+ { -18, -19, 38, 270, 661, 995, 1029, 736, 335, 66, -16, 19, },
+ { -19, -19, 36, 265, 654, 991, 1031, 743, 341, 69, -16, 20, },
+ { -19, -19, 35, 260, 648, 988, 1033, 749, 347, 71, -15, 18, },
+ { -19, -19, 33, 255, 642, 984, 1035, 755, 352, 74, -15, 19, },
+ { -19, -19, 31, 249, 635, 981, 1037, 761, 358, 77, -15, 20, },
+ { -19, -19, 29, 244, 629, 977, 1039, 767, 364, 80, -14, 19, },
+ { -19, -19, 27, 240, 622, 973, 1041, 773, 370, 83, -14, 19, },
+ { -19, -19, 25, 235, 616, 970, 1043, 779, 376, 86, -13, 17, },
+ { -19, -19, 24, 230, 609, 966, 1045, 785, 382, 89, -13, 17, },
+ { -19, -19, 22, 225, 603, 962, 1046, 791, 388, 92, -12, 17, },
+ { -19, -19, 20, 220, 596, 958, 1048, 796, 394, 95, -12, 19, },
+ { -20, -19, 19, 216, 590, 954, 1049, 802, 400, 98, -11, 18, },
+ { -20, -19, 17, 211, 583, 949, 1050, 808, 406, 101, -11, 21, },
+ { -20, -19, 16, 206, 577, 945, 1052, 814, 412, 104, -10, 19, },
+ { -20, -19, 14, 202, 570, 941, 1053, 819, 418, 107, -10, 21, },
+ { -20, -19, 13, 197, 564, 936, 1054, 825, 424, 111, -9, 20, },
+ { -20, -19, 12, 193, 557, 932, 1055, 831, 430, 114, -8, 19, },
+ { -21, -19, 10, 188, 551, 927, 1056, 836, 436, 118, -8, 22, },
+ { -21, -19, 9, 184, 544, 923, 1056, 842, 443, 121, -7, 21, },
+ { -21, -19, 8, 180, 538, 918, 1057, 847, 449, 125, -6, 20, },
+ { -21, -19, 7, 175, 531, 913, 1058, 853, 455, 128, -5, 21, },
+ { -21, -19, 5, 171, 525, 909, 1058, 858, 461, 132, -5, 22, },
+ { -21, -19, 4, 167, 519, 904, 1059, 863, 468, 136, -4, 20, },
+ { -22, -19, 3, 163, 512, 899, 1059, 869, 474, 139, -3, 22, },
+ { -22, -19, 2, 159, 506, 894, 1059, 874, 480, 143, -2, 22, },
+ { -22, -19, 1, 155, 499, 889, 1059, 879, 487, 147, -1, 22, }
+};
+
+enum xilinx_scaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+static const u32 xilinx_scaler_video_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_VUY8_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_VYYUYY8_1X24,
+};
+
+/* This bit is for xscaler feature flag */
+#define XSCALER_HPHASE_FIX BIT(0)
+
+/**
+ * struct xscaler_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xscaler_feature {
+ u32 flags;
+};
+
+/**
+ * struct xilinx_scaler - Core configuration of scaler device structure
+ * @base: pointer to register base address
+ * @dev: device structure
+ * @bridge: xilinx bridge
+ * @width_in: input width
+ * @height_in: input height
+ * @width_out: output width
+ * @height_out: output height
+ * @fmt_in: input format
+ * @fmt_out: output format
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @ctrl_clk: AXI Lite clock
+ * @axis_clk: Video Clock
+ * @cfg: Pointer to scaler config structure
+ */
+struct xilinx_scaler {
+ void __iomem *base;
+ struct device *dev;
+ struct xlnx_bridge bridge;
+ u32 width_in;
+ u32 height_in;
+ u32 width_out;
+ u32 height_out;
+ u32 fmt_in;
+ u32 fmt_out;
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u32 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+ struct gpio_desc *rst_gpio;
+ struct clk *ctrl_clk;
+ struct clk *axis_clk;
+ const struct xscaler_feature *cfg;
+};
+
+static inline void xilinx_scaler_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xilinx_scaler_read(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void xilinx_scaler_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) & ~clr);
+}
+
+static inline void xilinx_scaler_set(void __iomem *base, u32 offset, u32 set)
+{
+ xilinx_scaler_write(base, offset,
+ xilinx_scaler_read(base, offset) | set);
+}
+
+static inline void
+xilinx_scaler_disable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_clr(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+static inline void
+xilinx_scaler_enable_block(struct xilinx_scaler *scaler, u32 channel,
+ u32 ip_block)
+{
+ xilinx_scaler_set(scaler->base, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF, ip_block);
+}
+
+/**
+ * bridge_to_layer - Gets the parent structure
+ * @bridge: pointer to the member.
+ *
+ * Return: parent structure pointer
+ */
+static inline struct xilinx_scaler *bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xilinx_scaler, bridge);
+}
+
+/**
+ * xilinx_scaler_reset - Resets scaler block
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function resets scaler block
+ */
+static void xilinx_scaler_reset(struct xilinx_scaler *scaler)
+{
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+/**
+ * xv_hscaler_calculate_phases - Calculates h-scaler phases
+ * @scaler: Pointer to scaler registers base
+ * @width_in: input width
+ * @width_out: output width
+ * @pixel_rate: pixel rate
+ */
+static void
+xv_hscaler_calculate_phases(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = scaler->max_num_phases;
+ unsigned int nppc = scaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ scaler->H_phases[x] |= (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ scaler->H_phases[x] |= (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+ if (output_write_en) {
+ scaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+/**
+ * xv_hscaler_load_ext_coeff - Loads external coefficients of h-scaler
+ * @scaler: Pointer to scaler registers base
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads h-scaler coefficients.
+ */
+static void
+xv_hscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) {
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < offset; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ scaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+static const short *xv_select_coeff(struct xilinx_scaler *scaler,
+ u32 in, u32 out, u32 *ntaps)
+{
+ const short *coeff = NULL;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (out < in) {
+ u16 scale_ratio = (in * 10) / out;
+
+ /* Since XV_HSCALER_TAPS_* is same as XV_VSCALER_TAPS_* */
+ switch (*ntaps) {
+ case XV_HSCALER_TAPS_6:
+ *ntaps = XV_HSCALER_TAPS_6;
+ if (scale_ratio > 35)
+ coeff = &XV_fixedcoeff_taps6_SR4[0][0];
+ else if (scale_ratio > 25)
+ coeff = &XV_fixedcoeff_taps6_SR3[0][0];
+ else if (scale_ratio > 15)
+ coeff = &XV_fixedcoeff_taps6_SR2[0][0];
+ else
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps8_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps8_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps10_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps10_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps12_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_12;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps10_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(scaler->dev,
+ "Unsupported number of taps = %d",
+ *ntaps);
+ }
+ } else {
+ dev_dbg(scaler->dev, "scaler : scale up 6 tap");
+ coeff = &XV_lanczos2_taps6[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+
+ return coeff;
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u32 ntaps = scaler->num_hori_taps;
+
+ coeff = xv_select_coeff(scaler, width_in, width_out, &ntaps);
+ if (!coeff)
+ return -EINVAL;
+
+ xv_hscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_coeff - Sets h-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of h-scaler.
+ */
+static void xv_hscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = scaler->num_hori_taps;
+ u32 nphases = scaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_load_ext_coeff - Loads external coefficients of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @coeff: Pointer to coeff array
+ * @ntaps: number of taps
+ *
+ * This function loads v-scaler coefficients.
+ */
+static void
+xv_vscaler_load_ext_coeff(struct xilinx_scaler *scaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = scaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ scaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ scaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+/**
+ * xv_vscaler_set_coeff - Sets v-scaler coefficients
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets coefficients of v-scaler.
+ */
+static void xv_vscaler_set_coeff(struct xilinx_scaler *scaler)
+{
+ u32 nphases = scaler->max_num_phases;
+ u32 ntaps = scaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (scaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (scaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xilinx_scaler_write(scaler->base, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @scaler: Pointer to Scaler device structure
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xilinx_scaler *scaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u32 ntaps = scaler->num_vert_taps;
+
+ coeff = xv_select_coeff(scaler, height_in, height_out, &ntaps);
+ if (!coeff)
+ return -EINVAL;
+
+ xv_vscaler_load_ext_coeff(scaler, coeff, ntaps);
+ return 0;
+}
+
+/**
+ * xv_hscaler_set_phases - Sets phases of h-scaler
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets phases of h-scaler.
+ */
+static void
+xv_hscaler_set_phases(struct xilinx_scaler *scaler)
+{
+ u32 loop_width;
+ u32 index, val;
+ u32 offset, i, lsb, msb;
+
+ loop_width = scaler->max_pixels / scaler->pix_per_clk;
+ if (scaler->cfg->flags & XSCALER_HPHASE_FIX) {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX;
+ } else {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+ }
+
+ switch (scaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = scaler->H_phases[i] & XHSC_MASK_LOW_16BITS;
+ msb = scaler->H_phases[i + 1] & XHSC_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xilinx_scaler_write(scaler->base, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XSCALER_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (scaler->H_phases[i] & XHSC_MASK_LOW_32BITS);
+ xilinx_scaler_write(scaler->base, offset +
+ (i * 4), val);
+ }
+ return;
+ }
+}
+
+/**
+ * xv_vscaler_setup_video_fmt - Sets video format of v-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_in: format to be set
+ *
+ * This function set the given format of v-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int
+xv_vscaler_setup_video_fmt(struct xilinx_scaler *scaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Vscaler Unsupported media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ return video_in;
+}
+
+/**
+ * xv_hscaler_setup_video_fmt - Sets video format of h-scaler
+ * @scaler: Pointer to scaler device structure
+ * @code_out: bus format to be set
+ * @vsc_out: return value of vscaler
+ *
+ * This function set the given video format of h-scaler
+ *
+ * Return: format value on success. -EINVAL for invalid format.
+ *
+ */
+static int xv_hscaler_setup_video_fmt(struct xilinx_scaler *scaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ break;
+ case XVIDC_CSF_RGB:
+ break;
+ case XVIDC_CSF_YCRCB_420:
+ break;
+ default:
+ dev_info(scaler->dev, "unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ video_out = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ default:
+ dev_info(scaler->dev, "Hscaler Unsupported Out media fmt\n");
+ return -EINVAL;
+ }
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_parse_of - Parse device tree information
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function reads the device tree contents
+ *
+ * Return: 0 on success. -EINVAL for invalid value.
+ *
+ */
+static int xilinx_scaler_parse_of(struct xilinx_scaler *scaler)
+{
+ int ret;
+ u32 dt_ppc;
+ struct device_node *node = scaler->dev->of_node;
+
+ scaler->ctrl_clk = devm_clk_get(scaler->dev, "aclk_ctrl");
+ if (IS_ERR(scaler->ctrl_clk)) {
+ ret = PTR_ERR(scaler->ctrl_clk);
+ dev_err(scaler->dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ scaler->axis_clk = devm_clk_get(scaler->dev, "aclk_axis");
+ if (IS_ERR(scaler->axis_clk)) {
+ ret = PTR_ERR(scaler->axis_clk);
+ dev_err(scaler->dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,h-scaler-taps",
+ &scaler->num_hori_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "h-scaler-taps not present in DT\n");
+ return ret;
+ }
+ switch (scaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ scaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,v-scaler-taps",
+ &scaler->num_vert_taps);
+ if (ret < 0) {
+ dev_info(scaler->dev, "v-scaler-taps not present in DT\n");
+ return ret;
+ }
+
+ switch (scaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (scaler->num_vert_taps != scaler->num_hori_taps)
+ return -EINVAL;
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ scaler->is_polyphase = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,samples-per-clk", &dt_ppc);
+ if (ret < 0) {
+ dev_info(scaler->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2) {
+ dev_info(scaler->dev, "Unsupported ppc: %d", dt_ppc);
+ return -EINVAL;
+ }
+ scaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ scaler->rst_gpio = devm_gpiod_get(scaler->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(scaler->rst_gpio)) {
+ if (PTR_ERR(scaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(scaler->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(scaler->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &scaler->max_lines);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (scaler->max_lines > XSCALER_MAX_HEIGHT ||
+ scaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(scaler->dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &scaler->max_pixels);
+ if (ret < 0) {
+ dev_err(scaler->dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (scaler->max_pixels > XSCALER_MAX_WIDTH ||
+ scaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(scaler->dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_stream - Set up v-scaler and h-scaler for streaming
+ * @scaler: Pointer to scaler device structure
+ *
+ * This function sets up the required configuration of v-scaler and h-scaler
+ *
+ * Return: 0 on success. Returns -EINVAL on failure conditions.
+ */
+static int xilinx_scaler_stream(struct xilinx_scaler *scaler)
+{
+ u32 fmt_in, fmt_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ fmt_in = scaler->fmt_in;
+ fmt_out = scaler->fmt_out;
+ line_rate = (scaler->height_in * STEP_PRECISION) / scaler->height_out;
+
+ if (scaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(scaler, scaler->height_in,
+ scaler->height_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler select coeff\n");
+ return ret;
+ }
+ xv_vscaler_set_coeff(scaler);
+ }
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA,
+ line_rate);
+ ret = xv_vscaler_setup_video_fmt(scaler, scaler->fmt_in);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ pixel_rate = (scaler->width_in * STEP_PRECISION) / scaler->width_out;
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA,
+ pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(scaler, scaler->fmt_out, ret);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: vscaler setup video format\n");
+ return ret;
+ }
+ if (scaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(scaler, scaler->width_in,
+ scaler->width_out);
+ if (ret < 0) {
+ dev_info(scaler->dev, "Failed: hscaler select coeff\n");
+ return ret;
+ }
+ xv_hscaler_set_coeff(scaler);
+ }
+ xv_hscaler_calculate_phases(scaler, scaler->width_in,
+ scaler->width_out, pixel_rate);
+ xv_hscaler_set_phases(scaler);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_enable - enabes scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function enables the scaler sub-cores
+ *
+ * Return: 0 on success. Return -EINVAL on failure conditions.
+ *
+ */
+static int xilinx_scaler_bridge_enable(struct xlnx_bridge *bridge)
+{
+ int ret;
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ ret = xilinx_scaler_stream(scaler);
+ if (ret)
+ return ret;
+
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xilinx_scaler_enable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+ return ret;
+}
+
+/**
+ * xilinx_scaler_bridge_disable - disables scaler sub-cores
+ * @bridge: bridge instance
+ *
+ * This function disables the scaler sub-cores
+ */
+static void xilinx_scaler_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ xilinx_scaler_disable_block(scaler, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+}
+
+/**
+ * xilinx_scaler_bridge_set_input - Sets the input parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the input parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_in = height;
+ scaler->width_in = width;
+ scaler->fmt_in = bus_fmt;
+
+ /* IP Reset through GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+ memset(scaler->H_phases, 0, sizeof(scaler->H_phases));
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height);
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width);
+
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_input_fmts - input formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the input video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_set_output - Sets the output parameters of scaler
+ * @bridge: bridge instance
+ * @width: width of video
+ * @height: height of video
+ * @bus_fmt: video bus format
+ *
+ * This function sets the output parameters of scaler
+ * Return: 0 on success. -EINVAL for invalid parameters.
+ */
+static int xilinx_scaler_bridge_set_output(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct xilinx_scaler *scaler = bridge_to_layer(bridge);
+
+ if (width > scaler->max_pixels || height > scaler->max_lines)
+ return -EINVAL;
+
+ scaler->height_out = height;
+ scaler->width_out = width;
+ scaler->fmt_out = bus_fmt;
+
+ xilinx_scaler_write(scaler->base, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height);
+ xilinx_scaler_write(scaler->base, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width);
+ return 0;
+}
+
+/**
+ * xilinx_scaler_bridge_get_output_fmts - output formats supported by scaler
+ * @bridge: bridge instance
+ * @fmts: Pointer to be updated with formats information
+ * @count: count of video bus formats
+ *
+ * This function provides the output video formats information scaler
+ * Return: 0 on success.
+ */
+static int xilinx_scaler_bridge_get_output_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ *fmts = xilinx_scaler_video_fmts;
+ *count = ARRAY_SIZE(xilinx_scaler_video_fmts);
+ return 0;
+}
+
+static const struct xscaler_feature xlnx_scaler_v2_2 = {
+ .flags = XSCALER_HPHASE_FIX,
+};
+
+static const struct xscaler_feature xlnx_scaler = {
+ .flags = 0,
+};
+
+static const struct of_device_id xilinx_scaler_of_match[] = {
+ { .compatible = "xlnx,vpss-scaler",
+ .data = &xlnx_scaler},
+ { .compatible = "xlnx,vpss-scaler-2.2",
+ .data = &xlnx_scaler_v2_2},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xilinx_scaler_of_match);
+
+static int xilinx_scaler_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xilinx_scaler *scaler;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
+ if (!scaler)
+ return -ENOMEM;
+ scaler->dev = dev;
+
+ match = of_match_node(xilinx_scaler_of_match, node);
+ if (!match)
+ return -ENODEV;
+
+ scaler->cfg = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scaler->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scaler->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, scaler);
+
+ ret = xilinx_scaler_parse_of(scaler);
+ if (ret < 0) {
+ dev_info(scaler->dev, "parse_of failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->ctrl_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(scaler->axis_clk);
+ if (ret) {
+ dev_err(scaler->dev, "unable to enable video clk %d\n", ret);
+ goto err_ctrl_clk;
+ }
+
+ scaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ /* Reset the Global IP Reset through a GPIO */
+ gpiod_set_value_cansleep(scaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ xilinx_scaler_reset(scaler);
+
+ scaler->bridge.enable = &xilinx_scaler_bridge_enable;
+ scaler->bridge.disable = &xilinx_scaler_bridge_disable;
+ scaler->bridge.set_input = &xilinx_scaler_bridge_set_input;
+ scaler->bridge.get_input_fmts = &xilinx_scaler_bridge_get_input_fmts;
+ scaler->bridge.set_output = &xilinx_scaler_bridge_set_output;
+ scaler->bridge.get_output_fmts = &xilinx_scaler_bridge_get_output_fmts;
+ scaler->bridge.of_node = dev->of_node;
+
+ ret = xlnx_bridge_register(&scaler->bridge);
+ if (ret) {
+ dev_info(scaler->dev, "Bridge registration failed\n");
+ goto err_axis_clk;
+ }
+ dev_info(scaler->dev, "xlnx drm scaler experimental driver probed\n");
+
+ return 0;
+
+err_axis_clk:
+ clk_disable_unprepare(scaler->axis_clk);
+err_ctrl_clk:
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return ret;
+}
+
+static int xilinx_scaler_remove(struct platform_device *pdev)
+{
+ struct xilinx_scaler *scaler = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&scaler->bridge);
+ clk_disable_unprepare(scaler->axis_clk);
+ clk_disable_unprepare(scaler->ctrl_clk);
+ return 0;
+}
+
+static struct platform_driver scaler_bridge_driver = {
+ .probe = xilinx_scaler_probe,
+ .remove = xilinx_scaler_remove,
+ .driver = {
+ .name = "xlnx,scaler-bridge",
+ .of_match_table = xilinx_scaler_of_match,
+ },
+};
+
+module_platform_driver(scaler_bridge_driver);
+
+MODULE_AUTHOR("Venkateshwar Rao <vgannava@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SCALER Bridge Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi.c b/drivers/gpu/drm/xlnx/xlnx_sdi.c
new file mode 100644
index 000000000000..25eca89244e9
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx Subsystem driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <video/videomode.h>
+#include "xlnx_sdi_modes.h"
+#include "xlnx_sdi_timing.h"
+
+#include "xlnx_bridge.h"
+
+/* SDI register offsets */
+#define XSDI_TX_RST_CTRL 0x00
+#define XSDI_TX_MDL_CTRL 0x04
+#define XSDI_TX_GLBL_IER 0x0C
+#define XSDI_TX_ISR_STAT 0x10
+#define XSDI_TX_IER_STAT 0x14
+#define XSDI_TX_ST352_LINE 0x18
+#define XSDI_TX_ST352_DATA_CH0 0x1C
+#define XSDI_TX_VER 0x3C
+#define XSDI_TX_SYS_CFG 0x40
+#define XSDI_TX_STS_SB_TDATA 0x60
+#define XSDI_TX_AXI4S_STS1 0x68
+#define XSDI_TX_AXI4S_STS2 0x6C
+#define XSDI_TX_ST352_DATA_DS2 0x70
+
+/* MODULE_CTRL register masks */
+#define XSDI_TX_CTRL_M BIT(7)
+#define XSDI_TX_CTRL_INS_CRC BIT(12)
+#define XSDI_TX_CTRL_INS_ST352 BIT(13)
+#define XSDI_TX_CTRL_OVR_ST352 BIT(14)
+#define XSDI_TX_CTRL_INS_SYNC_BIT BIT(16)
+#define XSDI_TX_CTRL_USE_ANC_IN BIT(18)
+#define XSDI_TX_CTRL_INS_LN BIT(19)
+#define XSDI_TX_CTRL_INS_EDH BIT(20)
+#define XSDI_TX_CTRL_MODE 0x7
+#define XSDI_TX_CTRL_MUX 0x7
+#define XSDI_TX_CTRL_MODE_SHIFT 4
+#define XSDI_TX_CTRL_M_SHIFT 7
+#define XSDI_TX_CTRL_MUX_SHIFT 8
+#define XSDI_TX_CTRL_ST352_F2_EN_SHIFT 15
+#define XSDI_TX_CTRL_420_BIT BIT(21)
+#define XSDI_TX_CTRL_INS_ST352_CHROMA BIT(23)
+#define XSDI_TX_CTRL_USE_DS2_3GA BIT(24)
+
+/* TX_ST352_LINE register masks */
+#define XSDI_TX_ST352_LINE_MASK GENMASK(10, 0)
+#define XSDI_TX_ST352_LINE_F2_SHIFT 16
+
+/* ISR STAT register masks */
+#define XSDI_GTTX_RSTDONE_INTR BIT(0)
+#define XSDI_TX_CE_ALIGN_ERR_INTR BIT(1)
+#define XSDI_AXI4S_VID_LOCK_INTR BIT(8)
+#define XSDI_OVERFLOW_INTR BIT(9)
+#define XSDI_UNDERFLOW_INTR BIT(10)
+#define XSDI_IER_EN_MASK (XSDI_GTTX_RSTDONE_INTR | \
+ XSDI_TX_CE_ALIGN_ERR_INTR | \
+ XSDI_OVERFLOW_INTR | \
+ XSDI_UNDERFLOW_INTR)
+
+/* RST_CTRL_OFFSET masks */
+#define XSDI_TX_CTRL_EN BIT(0)
+#define XSDI_TX_BRIDGE_CTRL_EN BIT(8)
+#define XSDI_TX_AXI4S_CTRL_EN BIT(9)
+/* STS_SB_TX_TDATA masks */
+#define XSDI_TX_TDATA_GT_RESETDONE BIT(2)
+
+#define XSDI_TX_MUX_SD_HD_3GA 0
+#define XSDI_TX_MUX_3GB 1
+#define XSDI_TX_MUX_8STREAM_6G_12G 2
+#define XSDI_TX_MUX_4STREAM_6G 3
+#define XSDI_TX_MUX_16STREAM_12G 4
+
+#define SDI_MAX_DATASTREAM 8
+#define PIXELS_PER_CLK 2
+#define XSDI_CH_SHIFT 29
+#define XST352_PROG_PIC BIT(6)
+#define XST352_PROG_TRANS BIT(7)
+#define XST352_2048_SHIFT BIT(6)
+#define XST352_YUV420_MASK 0x03
+#define ST352_BYTE3 0x00
+#define ST352_BYTE4 0x01
+#define GT_TIMEOUT 50
+/* SDI modes */
+#define XSDI_MODE_HD 0
+#define XSDI_MODE_SD 1
+#define XSDI_MODE_3GA 2
+#define XSDI_MODE_3GB 3
+#define XSDI_MODE_6G 4
+#define XSDI_MODE_12G 5
+
+#define SDI_TIMING_PARAMS_SIZE 48
+
+/**
+ * enum payload_line_1 - Payload Ids Line 1 number
+ * @PAYLD_LN1_HD_3_6_12G: line 1 HD,3G,6G or 12G mode value
+ * @PAYLD_LN1_SDPAL: line 1 SD PAL mode value
+ * @PAYLD_LN1_SDNTSC: line 1 SD NTSC mode value
+ */
+enum payload_line_1 {
+ PAYLD_LN1_HD_3_6_12G = 10,
+ PAYLD_LN1_SDPAL = 9,
+ PAYLD_LN1_SDNTSC = 13
+};
+
+/**
+ * enum payload_line_2 - Payload Ids Line 2 number
+ * @PAYLD_LN2_HD_3_6_12G: line 2 HD,3G,6G or 12G mode value
+ * @PAYLD_LN2_SDPAL: line 2 SD PAL mode value
+ * @PAYLD_LN2_SDNTSC: line 2 SD NTSC mode value
+ */
+enum payload_line_2 {
+ PAYLD_LN2_HD_3_6_12G = 572,
+ PAYLD_LN2_SDPAL = 322,
+ PAYLD_LN2_SDNTSC = 276
+};
+
+/**
+ * struct xlnx_sdi - Core configuration SDI Tx subsystem device structure
+ * @encoder: DRM encoder structure
+ * @connector: DRM connector structure
+ * @dev: device structure
+ * @base: Base address of SDI subsystem
+ * @mode_flags: SDI operation mode related flags
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @enable_st352_chroma: Able to send ST352 packets in Chroma stream.
+ * @enable_anc_data: Enable/Disable Ancillary Data insertion for Audio
+ * @sdi_mode: configurable SDI mode parameter, supported values are:
+ * 0 - HD
+ * 1 - SD
+ * 2 - 3GA
+ * 3 - 3GB
+ * 4 - 6G
+ * 5 - 12G
+ * @sdi_mod_prop_val: configurable SDI mode parameter value
+ * @sdi_data_strm: configurable SDI data stream parameter
+ * @sdi_data_strm_prop_val: configurable number of SDI data streams
+ * value currently supported are 2, 4 and 8
+ * @sdi_420_in: Specifying input bus color format parameter to SDI
+ * @sdi_420_in_val: 1 for yuv420 and 0 for yuv422
+ * @sdi_420_out: configurable SDI out color format parameter
+ * @sdi_420_out_val: 1 for yuv420 and 0 for yuv422
+ * @is_frac_prop: configurable SDI fractional fps parameter
+ * @is_frac_prop_val: configurable SDI fractional fps parameter value
+ * @bridge: bridge structure
+ * @height_out: configurable bridge output height parameter
+ * @height_out_prop_val: configurable bridge output height parameter value
+ * @width_out: configurable bridge output width parameter
+ * @width_out_prop_val: configurable bridge output width parameter value
+ * @in_fmt: configurable bridge input media format
+ * @in_fmt_prop_val: configurable media bus format value
+ * @out_fmt: configurable bridge output media format
+ * @out_fmt_prop_val: configurable media bus format value
+ * @en_st352_c_prop: configurable ST352 payload on Chroma stream parameter
+ * @en_st352_c_val: configurable ST352 payload on Chroma parameter value
+ * @use_ds2_3ga_prop: Use DS2 instead of DS3 in 3GA mode parameter
+ * @use_ds2_3ga_val: Use DS2 instead of DS3 in 3GA mode parameter value
+ * @video_mode: current display mode
+ * @axi_clk: AXI Lite interface clock
+ * @sditx_clk: SDI Tx Clock
+ * @vidin_clk: Video Clock
+ */
+struct xlnx_sdi {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct device *dev;
+ void __iomem *base;
+ u32 mode_flags;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ bool enable_st352_chroma;
+ bool enable_anc_data;
+ struct drm_property *sdi_mode;
+ u32 sdi_mod_prop_val;
+ struct drm_property *sdi_data_strm;
+ u32 sdi_data_strm_prop_val;
+ struct drm_property *sdi_420_in;
+ bool sdi_420_in_val;
+ struct drm_property *sdi_420_out;
+ bool sdi_420_out_val;
+ struct drm_property *is_frac_prop;
+ bool is_frac_prop_val;
+ struct xlnx_bridge *bridge;
+ struct drm_property *height_out;
+ u32 height_out_prop_val;
+ struct drm_property *width_out;
+ u32 width_out_prop_val;
+ struct drm_property *in_fmt;
+ u32 in_fmt_prop_val;
+ struct drm_property *out_fmt;
+ u32 out_fmt_prop_val;
+ struct drm_property *en_st352_c_prop;
+ bool en_st352_c_val;
+ struct drm_property *use_ds2_3ga_prop;
+ bool use_ds2_3ga_val;
+ struct drm_display_mode video_mode;
+ struct clk *axi_clk;
+ struct clk *sditx_clk;
+ struct clk *vidin_clk;
+};
+
+#define connector_to_sdi(c) container_of(c, struct xlnx_sdi, connector)
+#define encoder_to_sdi(e) container_of(e, struct xlnx_sdi, encoder)
+
+static inline void xlnx_sdi_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_sdi_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+/**
+ * xlnx_sdi_en_axi4s - Enable SDI Tx AXI4S-to-Video core
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx AXI4S-to-Video core.
+ */
+static void xlnx_sdi_en_axi4s(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_AXI4S_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_en_bridge - Enable SDI Tx bridge
+ * @sdi: Pointer to SDI Tx structure
+ *
+ * This function enables the SDI Tx bridge.
+ */
+static void xlnx_sdi_en_bridge(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_BRIDGE_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_irq_handler - SDI Tx interrupt
+ * @irq: irq number
+ * @data: irq data
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ * This is the compact GT ready interrupt.
+ */
+static irqreturn_t xlnx_sdi_irq_handler(int irq, void *data)
+{
+ struct xlnx_sdi *sdi = (struct xlnx_sdi *)data;
+ u32 reg;
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_ISR_STAT);
+
+ if (reg & XSDI_GTTX_RSTDONE_INTR)
+ dev_dbg(sdi->dev, "GT reset interrupt received\n");
+ if (reg & XSDI_TX_CE_ALIGN_ERR_INTR)
+ dev_err_ratelimited(sdi->dev, "SDI SD CE align error\n");
+ if (reg & XSDI_OVERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Overflow error\n");
+ if (reg & XSDI_UNDERFLOW_INTR)
+ dev_err_ratelimited(sdi->dev, "AXI-4 Stream Underflow error\n");
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ISR_STAT,
+ reg & ~(XSDI_AXI4S_VID_LOCK_INTR));
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_STS_SB_TDATA);
+ if (reg & XSDI_TX_TDATA_GT_RESETDONE) {
+ sdi->event_received = true;
+ wake_up_interruptible(&sdi->wait_event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xlnx_sdi_set_payload_line - set ST352 packet line number
+ * @sdi: Pointer to SDI Tx structure
+ * @line_1: line number used to insert st352 packet for field 1.
+ * @line_2: line number used to insert st352 packet for field 2.
+ *
+ * This function set 352 packet line number.
+ */
+static void xlnx_sdi_set_payload_line(struct xlnx_sdi *sdi,
+ u32 line_1, u32 line_2)
+{
+ u32 data;
+
+ data = ((line_1 & XSDI_TX_ST352_LINE_MASK) |
+ ((line_2 & XSDI_TX_ST352_LINE_MASK) <<
+ XSDI_TX_ST352_LINE_F2_SHIFT));
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_ST352_LINE, data);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data |= (1 << XSDI_TX_CTRL_ST352_F2_EN_SHIFT);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_payload_data - set ST352 packet payload
+ * @sdi: Pointer to SDI Tx structure
+ * @data_strm: data stream number
+ * @payload: st352 packet payload
+ *
+ * This function set ST352 payload data to corresponding stream.
+ */
+static void xlnx_sdi_set_payload_data(struct xlnx_sdi *sdi,
+ u32 data_strm, u32 payload)
+{
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_CH0 + (data_strm * 4)), payload);
+
+ dev_dbg(sdi->dev, "enable_st352_chroma = %d and en_st352_c_val = %d\n",
+ sdi->enable_st352_chroma, sdi->en_st352_c_val);
+ if (sdi->enable_st352_chroma && sdi->en_st352_c_val) {
+ xlnx_sdi_writel(sdi->base,
+ (XSDI_TX_ST352_DATA_DS2 + (data_strm * 4)),
+ payload);
+ }
+}
+
+/**
+ * xlnx_sdi_set_display_disable - Disable the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and disables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_disable(struct xlnx_sdi *sdi)
+{
+ u32 i;
+
+ for (i = 0; i < SDI_MAX_DATASTREAM; i++)
+ xlnx_sdi_set_payload_data(sdi, i, 0);
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, 0);
+}
+
+/**
+ * xlnx_sdi_payload_config - config the SDI payload parameters
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: display mode
+ *
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_payload_config(struct xlnx_sdi *sdi, u32 mode)
+{
+ u32 payload_1, payload_2;
+
+ switch (mode) {
+ case XSDI_MODE_SD:
+ payload_1 = PAYLD_LN1_SDPAL;
+ payload_2 = PAYLD_LN2_SDPAL;
+ break;
+ case XSDI_MODE_HD:
+ case XSDI_MODE_3GA:
+ case XSDI_MODE_3GB:
+ case XSDI_MODE_6G:
+ case XSDI_MODE_12G:
+ payload_1 = PAYLD_LN1_HD_3_6_12G;
+ payload_2 = PAYLD_LN2_HD_3_6_12G;
+ break;
+ default:
+ payload_1 = 0;
+ payload_2 = 0;
+ break;
+ }
+
+ xlnx_sdi_set_payload_line(sdi, payload_1, payload_2);
+}
+
+/**
+ * xlnx_sdi_set_mode - Set mode parameters in SDI Tx
+ * @sdi: pointer Xilinx SDI Tx structure
+ * @mode: SDI Tx display mode
+ * @is_frac: 0 - integer 1 - fractional
+ * @mux_ptrn: specifiy the data stream interleaving pattern to be used
+ * This function config the SDI st352 payload parameter.
+ */
+static void xlnx_sdi_set_mode(struct xlnx_sdi *sdi, u32 mode,
+ bool is_frac, u32 mux_ptrn)
+{
+ u32 data;
+
+ xlnx_sdi_payload_config(sdi, mode);
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ data &= ~(XSDI_TX_CTRL_MODE << XSDI_TX_CTRL_MODE_SHIFT);
+ data &= ~(XSDI_TX_CTRL_M);
+ data &= ~(XSDI_TX_CTRL_MUX << XSDI_TX_CTRL_MUX_SHIFT);
+ data &= ~XSDI_TX_CTRL_420_BIT;
+
+ data |= (((mode & XSDI_TX_CTRL_MODE) << XSDI_TX_CTRL_MODE_SHIFT) |
+ (is_frac << XSDI_TX_CTRL_M_SHIFT) |
+ ((mux_ptrn & XSDI_TX_CTRL_MUX) << XSDI_TX_CTRL_MUX_SHIFT));
+
+ if (sdi->sdi_420_out_val)
+ data |= XSDI_TX_CTRL_420_BIT;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_set_config_parameters - Configure SDI Tx registers with parameters
+ * given from user application.
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI structure having drm_property parameters
+ * configured from user application and writes them into SDI IP registers.
+ */
+static void xlnx_sdi_set_config_parameters(struct xlnx_sdi *sdi)
+{
+ int mux_ptrn = -EINVAL;
+
+ switch (sdi->sdi_mod_prop_val) {
+ case XSDI_MODE_3GA:
+ mux_ptrn = XSDI_TX_MUX_SD_HD_3GA;
+ break;
+ case XSDI_MODE_3GB:
+ mux_ptrn = XSDI_TX_MUX_3GB;
+ break;
+ case XSDI_MODE_6G:
+ if (sdi->sdi_data_strm_prop_val == 4)
+ mux_ptrn = XSDI_TX_MUX_4STREAM_6G;
+ else if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ case XSDI_MODE_12G:
+ if (sdi->sdi_data_strm_prop_val == 8)
+ mux_ptrn = XSDI_TX_MUX_8STREAM_6G_12G;
+ break;
+ default:
+ mux_ptrn = 0;
+ break;
+ }
+ if (mux_ptrn == -EINVAL) {
+ dev_err(sdi->dev, "%d data stream not supported for %d mode",
+ sdi->sdi_data_strm_prop_val, sdi->sdi_mod_prop_val);
+ return;
+ }
+ xlnx_sdi_set_mode(sdi, sdi->sdi_mod_prop_val, sdi->is_frac_prop_val,
+ mux_ptrn);
+}
+
+/**
+ * xlnx_sdi_atomic_set_property - implementation of drm_connector_funcs
+ * set_property invoked by IOCTL call to DRM_IOCTL_MODE_OBJ_SETPROPERTY
+ *
+ * @connector: pointer Xilinx SDI connector
+ * @state: DRM connector state
+ * @property: pointer to the drm_property structure
+ * @val: SDI parameter value that is configured from user application
+ *
+ * This function takes a drm_property name and value given from user application
+ * and update the SDI structure property varabiles with the values.
+ * These values are later used to configure the SDI Rx IP.
+ *
+ * Return: 0 on success OR -EINVAL if setting property fails
+ */
+static int
+xlnx_sdi_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property, uint64_t val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ sdi->sdi_mod_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_data_strm)
+ sdi->sdi_data_strm_prop_val = (unsigned int)val;
+ else if (property == sdi->sdi_420_in)
+ sdi->sdi_420_in_val = val;
+ else if (property == sdi->sdi_420_out)
+ sdi->sdi_420_out_val = val;
+ else if (property == sdi->is_frac_prop)
+ sdi->is_frac_prop_val = !!val;
+ else if (property == sdi->height_out)
+ sdi->height_out_prop_val = (unsigned int)val;
+ else if (property == sdi->width_out)
+ sdi->width_out_prop_val = (unsigned int)val;
+ else if (property == sdi->in_fmt)
+ sdi->in_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->out_fmt)
+ sdi->out_fmt_prop_val = (unsigned int)val;
+ else if (property == sdi->en_st352_c_prop)
+ sdi->en_st352_c_val = !!val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ sdi->use_ds2_3ga_val = !!val;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+static int
+xlnx_sdi_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(connector);
+
+ if (property == sdi->sdi_mode)
+ *val = sdi->sdi_mod_prop_val;
+ else if (property == sdi->sdi_data_strm)
+ *val = sdi->sdi_data_strm_prop_val;
+ else if (property == sdi->sdi_420_in)
+ *val = sdi->sdi_420_in_val;
+ else if (property == sdi->sdi_420_out)
+ *val = sdi->sdi_420_out_val;
+ else if (property == sdi->is_frac_prop)
+ *val = sdi->is_frac_prop_val;
+ else if (property == sdi->height_out)
+ *val = sdi->height_out_prop_val;
+ else if (property == sdi->width_out)
+ *val = sdi->width_out_prop_val;
+ else if (property == sdi->in_fmt)
+ *val = sdi->in_fmt_prop_val;
+ else if (property == sdi->out_fmt)
+ *val = sdi->out_fmt_prop_val;
+ else if (property == sdi->en_st352_c_prop)
+ *val = sdi->en_st352_c_val;
+ else if (property == sdi->use_ds2_3ga_prop)
+ *val = sdi->use_ds2_3ga_val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_get_mode_id - Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ *
+ * Return: mode id if mode is found OR -EINVAL otherwise
+ */
+static int xlnx_sdi_get_mode_id(struct drm_display_mode *mode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++)
+ if (drm_mode_equal(&xlnx_sdi_modes[i].mode, mode))
+ return i;
+ return -EINVAL;
+}
+
+/**
+ * xlnx_sdi_drm_add_modes - Adds SDI supported modes
+ * @connector: pointer Xilinx SDI connector
+ *
+ * Return: Count of modes added
+ *
+ * This function adds the SDI modes supported and returns its count
+ */
+static int xlnx_sdi_drm_add_modes(struct drm_connector *connector)
+{
+ int num_modes = 0;
+ u32 i;
+ struct drm_display_mode *mode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ const struct drm_display_mode *ptr = &xlnx_sdi_modes[i].mode;
+
+ mode = drm_mode_duplicate(dev, ptr);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ }
+ return num_modes;
+}
+
+static enum drm_connector_status
+xlnx_sdi_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void xlnx_sdi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ connector->dev = NULL;
+}
+
+static const struct drm_connector_funcs xlnx_sdi_connector_funcs = {
+ .detect = xlnx_sdi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = xlnx_sdi_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = xlnx_sdi_atomic_set_property,
+ .atomic_get_property = xlnx_sdi_atomic_get_property,
+};
+
+static struct drm_encoder *
+xlnx_sdi_best_encoder(struct drm_connector *connector)
+{
+ return &(connector_to_sdi(connector)->encoder);
+}
+
+static int xlnx_sdi_get_modes(struct drm_connector *connector)
+{
+ return xlnx_sdi_drm_add_modes(connector);
+}
+
+static struct drm_connector_helper_funcs xlnx_sdi_connector_helper_funcs = {
+ .get_modes = xlnx_sdi_get_modes,
+ .best_encoder = xlnx_sdi_best_encoder,
+};
+
+/**
+ * xlnx_sdi_drm_connector_create_property - create SDI connector properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ *
+ * This function takes the xilinx SDI connector component and defines
+ * the drm_property variables with their default values.
+ */
+static void
+xlnx_sdi_drm_connector_create_property(struct drm_connector *base_connector)
+{
+ struct drm_device *dev = base_connector->dev;
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+
+ sdi->is_frac_prop = drm_property_create_bool(dev, 0, "is_frac");
+ sdi->sdi_mode = drm_property_create_range(dev, 0,
+ "sdi_mode", 0, 5);
+ sdi->sdi_data_strm = drm_property_create_range(dev, 0,
+ "sdi_data_stream", 2, 8);
+ sdi->sdi_420_in = drm_property_create_bool(dev, 0, "sdi_420_in");
+ sdi->sdi_420_out = drm_property_create_bool(dev, 0, "sdi_420_out");
+ sdi->height_out = drm_property_create_range(dev, 0,
+ "height_out", 2, 4096);
+ sdi->width_out = drm_property_create_range(dev, 0,
+ "width_out", 2, 4096);
+ sdi->in_fmt = drm_property_create_range(dev, 0,
+ "in_fmt", 0, 16384);
+ sdi->out_fmt = drm_property_create_range(dev, 0,
+ "out_fmt", 0, 16384);
+ if (sdi->enable_st352_chroma) {
+ sdi->en_st352_c_prop = drm_property_create_bool(dev, 0,
+ "en_st352_c");
+ sdi->use_ds2_3ga_prop = drm_property_create_bool(dev, 0,
+ "use_ds2_3ga");
+ }
+}
+
+/**
+ * xlnx_sdi_drm_connector_attach_property - attach SDI connector
+ * properties
+ *
+ * @base_connector: pointer to Xilinx SDI connector
+ */
+static void
+xlnx_sdi_drm_connector_attach_property(struct drm_connector *base_connector)
+{
+ struct xlnx_sdi *sdi = connector_to_sdi(base_connector);
+ struct drm_mode_object *obj = &base_connector->base;
+
+ if (sdi->sdi_mode)
+ drm_object_attach_property(obj, sdi->sdi_mode, 0);
+
+ if (sdi->sdi_data_strm)
+ drm_object_attach_property(obj, sdi->sdi_data_strm, 0);
+
+ if (sdi->sdi_420_in)
+ drm_object_attach_property(obj, sdi->sdi_420_in, 0);
+
+ if (sdi->sdi_420_out)
+ drm_object_attach_property(obj, sdi->sdi_420_out, 0);
+
+ if (sdi->is_frac_prop)
+ drm_object_attach_property(obj, sdi->is_frac_prop, 0);
+
+ if (sdi->height_out)
+ drm_object_attach_property(obj, sdi->height_out, 0);
+
+ if (sdi->width_out)
+ drm_object_attach_property(obj, sdi->width_out, 0);
+
+ if (sdi->in_fmt)
+ drm_object_attach_property(obj, sdi->in_fmt, 0);
+
+ if (sdi->out_fmt)
+ drm_object_attach_property(obj, sdi->out_fmt, 0);
+
+ if (sdi->en_st352_c_prop)
+ drm_object_attach_property(obj, sdi->en_st352_c_prop, 0);
+
+ if (sdi->use_ds2_3ga_prop)
+ drm_object_attach_property(obj, sdi->use_ds2_3ga_prop, 0);
+}
+
+static int xlnx_sdi_create_connector(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_connector *connector = &sdi->connector;
+ int ret;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+
+ ret = drm_connector_init(encoder->dev, connector,
+ &xlnx_sdi_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(sdi->dev, "Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &xlnx_sdi_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ xlnx_sdi_drm_connector_create_property(connector);
+ xlnx_sdi_drm_connector_attach_property(connector);
+
+ return 0;
+}
+
+/**
+ * xlnx_sdi_set_display_enable - Enables the SDI Tx IP core enable
+ * register bit
+ * @sdi: SDI structure having the updated user parameters
+ *
+ * This function takes the SDI strucure and enables the core enable bit
+ * of core configuration register.
+ */
+static void xlnx_sdi_set_display_enable(struct xlnx_sdi *sdi)
+{
+ u32 data;
+
+ data = xlnx_sdi_readl(sdi->base, XSDI_TX_RST_CTRL);
+ data |= XSDI_TX_CTRL_EN;
+ xlnx_sdi_writel(sdi->base, XSDI_TX_RST_CTRL, data);
+}
+
+/**
+ * xlnx_sdi_calc_st352_payld - calculate the st352 payload
+ *
+ * @sdi: pointer to SDI Tx structure
+ * @mode: DRM display mode
+ *
+ * This function calculates the st352 payload to be configured.
+ * Please refer to SMPTE ST352 documents for it.
+ * Return: return st352 payload
+ */
+static u32 xlnx_sdi_calc_st352_payld(struct xlnx_sdi *sdi,
+ struct drm_display_mode *mode)
+{
+ u8 byt1, byt2;
+ u16 is_p;
+ int id;
+ u32 sdi_mode = sdi->sdi_mod_prop_val;
+ bool is_frac = sdi->is_frac_prop_val;
+ u32 byt3 = ST352_BYTE3;
+
+ id = xlnx_sdi_get_mode_id(mode);
+ dev_dbg(sdi->dev, "mode id: %d\n", id);
+ if (mode->hdisplay == 2048 || mode->hdisplay == 4096)
+ byt3 |= XST352_2048_SHIFT;
+ if (sdi->sdi_420_in_val)
+ byt3 |= XST352_YUV420_MASK;
+
+ /* byte 2 calculation */
+ is_p = !(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ byt2 = xlnx_sdi_modes[id].st352_byt2[is_frac];
+ if (sdi_mode == XSDI_MODE_3GB ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN) || is_p)
+ byt2 |= XST352_PROG_PIC;
+ if (is_p && mode->vtotal >= 1125)
+ byt2 |= XST352_PROG_TRANS;
+
+ /* byte 1 calculation */
+ byt1 = xlnx_sdi_modes[id].st352_byt1[sdi_mode];
+
+ return (ST352_BYTE4 << 24 | byt3 << 16 | byt2 << 8 | byt1);
+}
+
+static void xlnx_sdi_setup(struct xlnx_sdi *sdi)
+{
+ u32 reg;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+
+ reg = xlnx_sdi_readl(sdi->base, XSDI_TX_MDL_CTRL);
+ reg |= XSDI_TX_CTRL_INS_CRC | XSDI_TX_CTRL_INS_ST352 |
+ XSDI_TX_CTRL_OVR_ST352 | XSDI_TX_CTRL_INS_SYNC_BIT |
+ XSDI_TX_CTRL_INS_EDH;
+
+ if (sdi->enable_anc_data)
+ reg |= XSDI_TX_CTRL_USE_ANC_IN;
+
+ if (sdi->enable_st352_chroma) {
+ if (sdi->en_st352_c_val) {
+ reg |= XSDI_TX_CTRL_INS_ST352_CHROMA;
+ if (sdi->use_ds2_3ga_val)
+ reg |= XSDI_TX_CTRL_USE_DS2_3GA;
+ else
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ } else {
+ reg &= ~XSDI_TX_CTRL_INS_ST352_CHROMA;
+ reg &= ~XSDI_TX_CTRL_USE_DS2_3GA;
+ }
+ }
+
+ xlnx_sdi_writel(sdi->base, XSDI_TX_MDL_CTRL, reg);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_IER_STAT, XSDI_IER_EN_MASK);
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 1);
+ xlnx_stc_reset(sdi->base);
+}
+
+/**
+ * xlnx_sdi_encoder_atomic_mode_set - drive the SDI timing parameters
+ *
+ * @encoder: pointer to Xilinx DRM encoder
+ * @crtc_state: DRM crtc state
+ * @connector_state: DRM connector state
+ *
+ * This function derives the SDI IP timing parameters from the timing
+ * values given to timing module.
+ */
+static void xlnx_sdi_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct videomode vm;
+ u32 payload, i;
+ u32 sditx_blank, vtc_blank;
+
+ /* Set timing parameters as per bridge output parameters */
+ xlnx_bridge_set_input(sdi->bridge, adjusted_mode->hdisplay,
+ adjusted_mode->vdisplay, sdi->in_fmt_prop_val);
+ xlnx_bridge_set_output(sdi->bridge, sdi->width_out_prop_val,
+ sdi->height_out_prop_val, sdi->out_fmt_prop_val);
+ xlnx_bridge_enable(sdi->bridge);
+
+ if (sdi->bridge) {
+ for (i = 0; i < ARRAY_SIZE(xlnx_sdi_modes); i++) {
+ if (xlnx_sdi_modes[i].mode.hdisplay ==
+ sdi->width_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vdisplay ==
+ sdi->height_out_prop_val &&
+ xlnx_sdi_modes[i].mode.vrefresh ==
+ adjusted_mode->vrefresh) {
+ memcpy((char *)adjusted_mode +
+ offsetof(struct drm_display_mode,
+ clock),
+ &xlnx_sdi_modes[i].mode.clock,
+ SDI_TIMING_PARAMS_SIZE);
+ break;
+ }
+ }
+ }
+
+ xlnx_sdi_setup(sdi);
+ xlnx_sdi_set_config_parameters(sdi);
+
+ /* set st352 payloads */
+ payload = xlnx_sdi_calc_st352_payld(sdi, adjusted_mode);
+ dev_dbg(sdi->dev, "payload : %0x\n", payload);
+
+ for (i = 0; i < sdi->sdi_data_strm_prop_val / 2; i++) {
+ if (sdi->sdi_mod_prop_val == XSDI_MODE_3GB)
+ payload |= (i << 1) << XSDI_CH_SHIFT;
+ xlnx_sdi_set_payload_data(sdi, i, payload);
+ }
+
+ /* UHDSDI is fixed 2 pixels per clock, horizontal timings div by 2 */
+ vm.hactive = adjusted_mode->hdisplay / PIXELS_PER_CLK;
+ vm.hfront_porch = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) / PIXELS_PER_CLK;
+ vm.hback_porch = (adjusted_mode->htotal -
+ adjusted_mode->hsync_end) / PIXELS_PER_CLK;
+ vm.hsync_len = (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) / PIXELS_PER_CLK;
+
+ vm.vactive = adjusted_mode->vdisplay;
+ vm.vfront_porch = adjusted_mode->vsync_start -
+ adjusted_mode->vdisplay;
+ vm.vback_porch = adjusted_mode->vtotal -
+ adjusted_mode->vsync_end;
+ vm.vsync_len = adjusted_mode->vsync_end -
+ adjusted_mode->vsync_start;
+ vm.flags = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vm.flags |= DISPLAY_FLAGS_INTERLACED;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ vm.flags |= DISPLAY_FLAGS_HSYNC_LOW;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vm.flags |= DISPLAY_FLAGS_VSYNC_LOW;
+
+ do {
+ sditx_blank = (adjusted_mode->hsync_start -
+ adjusted_mode->hdisplay) +
+ (adjusted_mode->hsync_end -
+ adjusted_mode->hsync_start) +
+ (adjusted_mode->htotal -
+ adjusted_mode->hsync_end);
+
+ vtc_blank = (vm.hfront_porch + vm.hback_porch +
+ vm.hsync_len) * PIXELS_PER_CLK;
+
+ if (vtc_blank != sditx_blank)
+ vm.hfront_porch++;
+ } while (vtc_blank < sditx_blank);
+
+ vm.pixelclock = adjusted_mode->clock * 1000;
+
+ /* parameters for sdi audio */
+ sdi->video_mode.vdisplay = adjusted_mode->vdisplay;
+ sdi->video_mode.hdisplay = adjusted_mode->hdisplay;
+ sdi->video_mode.vrefresh = adjusted_mode->vrefresh;
+ sdi->video_mode.flags = adjusted_mode->flags;
+
+ xlnx_stc_sig(sdi->base, &vm);
+}
+
+static void xlnx_sdi_commit(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+ long ret;
+
+ dev_dbg(sdi->dev, "%s\n", __func__);
+ xlnx_sdi_set_display_enable(sdi);
+ ret = wait_event_interruptible_timeout(sdi->wait_event,
+ sdi->event_received,
+ usecs_to_jiffies(GT_TIMEOUT));
+ if (!ret) {
+ dev_err(sdi->dev, "Timeout: GT interrupt not received\n");
+ return;
+ }
+ sdi->event_received = false;
+ /* enable sdi bridge, timing controller and Axi4s_vid_out_ctrl */
+ xlnx_sdi_en_bridge(sdi);
+ xlnx_stc_enable(sdi->base);
+ xlnx_sdi_en_axi4s(sdi);
+}
+
+static void xlnx_sdi_disable(struct drm_encoder *encoder)
+{
+ struct xlnx_sdi *sdi = encoder_to_sdi(encoder);
+
+ if (sdi->bridge)
+ xlnx_bridge_disable(sdi->bridge);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+}
+
+static const struct drm_encoder_helper_funcs xlnx_sdi_encoder_helper_funcs = {
+ .atomic_mode_set = xlnx_sdi_encoder_atomic_mode_set,
+ .enable = xlnx_sdi_commit,
+ .disable = xlnx_sdi_disable,
+};
+
+static const struct drm_encoder_funcs xlnx_sdi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int xlnx_sdi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &sdi->encoder;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * TODO: The possible CRTCs are 1 now as per current implementation of
+ * SDI tx drivers. DRM framework can support more than one CRTCs and
+ * SDI driver can be enhanced for that.
+ */
+ encoder->possible_crtcs = 1;
+
+ drm_encoder_init(drm_dev, encoder, &xlnx_sdi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &xlnx_sdi_encoder_helper_funcs);
+
+ ret = xlnx_sdi_create_connector(encoder);
+ if (ret) {
+ dev_err(sdi->dev, "fail creating connector, ret = %d\n", ret);
+ drm_encoder_cleanup(encoder);
+ }
+ return ret;
+}
+
+static void xlnx_sdi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct xlnx_sdi *sdi = dev_get_drvdata(dev);
+
+ xlnx_sdi_set_display_disable(sdi);
+ xlnx_stc_disable(sdi->base);
+ drm_encoder_cleanup(&sdi->encoder);
+ drm_connector_cleanup(&sdi->connector);
+ xlnx_bridge_disable(sdi->bridge);
+}
+
+static const struct component_ops xlnx_sdi_component_ops = {
+ .bind = xlnx_sdi_bind,
+ .unbind = xlnx_sdi_unbind,
+};
+
+static int xlnx_sdi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct xlnx_sdi *sdi;
+ struct device_node *vpss_node;
+ int ret, irq;
+ struct device_node *ports, *port;
+ u32 nports = 0, portmask = 0;
+
+ sdi = devm_kzalloc(dev, sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
+ sdi->dev = dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sdi->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(sdi->base);
+ }
+ platform_set_drvdata(pdev, sdi);
+
+ sdi->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ if (IS_ERR(sdi->axi_clk)) {
+ ret = PTR_ERR(sdi->axi_clk);
+ dev_err(dev, "failed to get s_axi_aclk %d\n", ret);
+ return ret;
+ }
+
+ sdi->sditx_clk = devm_clk_get(dev, "sdi_tx_clk");
+ if (IS_ERR(sdi->sditx_clk)) {
+ ret = PTR_ERR(sdi->sditx_clk);
+ dev_err(dev, "failed to get sdi_tx_clk %d\n", ret);
+ return ret;
+ }
+
+ sdi->vidin_clk = devm_clk_get(dev, "video_in_clk");
+ if (IS_ERR(sdi->vidin_clk)) {
+ ret = PTR_ERR(sdi->vidin_clk);
+ dev_err(dev, "failed to get video_in_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->axi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable axi_clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdi->sditx_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable sditx_clk %d\n", ret);
+ goto err_disable_axi_clk;
+ }
+
+ ret = clk_prepare_enable(sdi->vidin_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vidin_clk %d\n", ret);
+ goto err_disable_sditx_clk;
+ }
+
+ /* in case all "port" nodes are grouped under a "ports" node */
+ ports = of_get_child_by_name(sdi->dev->of_node, "ports");
+ if (!ports) {
+ dev_dbg(dev, "Searching for port nodes in device node.\n");
+ ports = sdi->dev->of_node;
+ }
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+ u32 index;
+
+ if (!port->name || of_node_cmp(port->name, "port")) {
+ dev_dbg(dev, "port name is null or node name is not port!\n");
+ continue;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(dev, "No remote port at %s\n", port->name);
+ of_node_put(endpoint);
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ of_node_put(endpoint);
+
+ ret = of_property_read_u32(port, "reg", &index);
+ if (ret) {
+ dev_err(dev, "reg property not present - %d\n", ret);
+ goto err_disable_vidin_clk;
+ }
+
+ portmask |= (1 << index);
+
+ nports++;
+ }
+
+ if (nports == 2 && portmask & 0x3) {
+ dev_dbg(dev, "enable ancillary port\n");
+ sdi->enable_anc_data = true;
+ } else if (nports == 1 && portmask & 0x1) {
+ dev_dbg(dev, "no ancillary port\n");
+ sdi->enable_anc_data = false;
+ } else {
+ dev_err(dev, "Incorrect dt node!\n");
+ ret = -EINVAL;
+ goto err_disable_vidin_clk;
+ }
+
+ sdi->enable_st352_chroma = of_property_read_bool(sdi->dev->of_node,
+ "xlnx,tx-insert-c-str-st352");
+
+ /* disable interrupt */
+ xlnx_sdi_writel(sdi->base, XSDI_TX_GLBL_IER, 0);
+ irq = platform_get_irq_byname(pdev, "sdi_tx_irq");
+ if (irq < 0) {
+ /*
+ * If there is no IRQ with this name, try to get the first
+ * IRQ defined in the device tree.
+ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_disable_vidin_clk;
+ }
+ }
+
+ ret = devm_request_threaded_irq(sdi->dev, irq, NULL,
+ xlnx_sdi_irq_handler, IRQF_ONESHOT,
+ dev_name(sdi->dev), sdi);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ /* initialize the wait queue for GT reset event */
+ init_waitqueue_head(&sdi->wait_event);
+
+ /* Bridge support */
+ vpss_node = of_parse_phandle(sdi->dev->of_node, "xlnx,vpss", 0);
+ if (vpss_node) {
+ sdi->bridge = of_xlnx_bridge_get(vpss_node);
+ if (!sdi->bridge) {
+ dev_info(sdi->dev, "Didn't get bridge instance\n");
+ ret = -EPROBE_DEFER;
+ goto err_disable_vidin_clk;
+ }
+ }
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This will be used in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ pdev->dev.platform_data = &sdi->video_mode;
+
+ ret = component_add(dev, &xlnx_sdi_component_ops);
+ if (ret < 0)
+ goto err_disable_vidin_clk;
+
+ return ret;
+
+err_disable_vidin_clk:
+ clk_disable_unprepare(sdi->vidin_clk);
+err_disable_sditx_clk:
+ clk_disable_unprepare(sdi->sditx_clk);
+err_disable_axi_clk:
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return ret;
+}
+
+static int xlnx_sdi_remove(struct platform_device *pdev)
+{
+ struct xlnx_sdi *sdi = platform_get_drvdata(pdev);
+
+ component_del(&pdev->dev, &xlnx_sdi_component_ops);
+ clk_disable_unprepare(sdi->vidin_clk);
+ clk_disable_unprepare(sdi->sditx_clk);
+ clk_disable_unprepare(sdi->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_of_match[] = {
+ { .compatible = "xlnx,sdi-tx"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_of_match);
+
+static struct platform_driver sdi_tx_driver = {
+ .probe = xlnx_sdi_probe,
+ .remove = xlnx_sdi_remove,
+ .driver = {
+ .name = "xlnx-sdi-tx",
+ .of_match_table = xlnx_sdi_of_match,
+ },
+};
+
+module_platform_driver(sdi_tx_driver);
+
+MODULE_AUTHOR("Saurabh Sengar <saurabhs@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx FPGA SDI Tx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
new file mode 100644
index 000000000000..534f7d80f29c
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_modes.h
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI modes timing values for various
+ * resolutions
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_MODES_H_
+#define _XLNX_SDI_MODES_H_
+
+/**
+ * struct xlnx_sdi_display_config - SDI supported modes structure
+ * @mode: drm display mode
+ * @st352_byt2: st352 byte 2 value
+ * index 0 : value for integral fps
+ * index 1 : value for fractional fps
+ * @st352_byt1: st352 byte 1 value
+ * index 0 : value for HD mode
+ * index 1 : value for SD mode
+ * index 2 : value for 3GA
+ * index 3 : value for 3GB
+ * index 4 : value for 6G
+ * index 5 : value for 12G
+ */
+struct xlnx_sdi_display_config {
+ struct drm_display_mode mode;
+ u8 st352_byt2[2];
+ u8 st352_byt1[6];
+};
+
+/*
+ * xlnx_sdi_modes - SDI DRM modes
+ */
+static const struct xlnx_sdi_display_config xlnx_sdi_modes[] = {
+ /* 0 - dummy, VICs start at 1 */
+ { },
+ /* SD: 720x486i@60Hz */
+ {{ DRM_MODE("720x486i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+ 801, 858, 0, 243, 247, 250, 262, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* SD: 720x576i@50Hz */
+ {{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+ 795, 864, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x81, 0x81, 0x81, 0x81, 0x81, 0x81} },
+ /* HD: 1280x720@25Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2990, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@24Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 3155, 4125, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@30Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 2250,
+ 2330, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@50Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1280x720@60Hz */
+ {{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x84, 0x84, 0x88, 0x84, 0x84, 0x84} },
+ /* HD: 1920x1080@24Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@25Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080@30Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@48Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@50Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080i@60Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@24Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2291,
+ 2379, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@25Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 1920x1080sf@30Hz */
+ {{ DRM_MODE("1920x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@48Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@50Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080i@60Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@24Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2377,
+ 2421, 2750, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 48, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@25Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2322,
+ 2366, 2640, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 50, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080sf@30Hz */
+ {{ DRM_MODE("2048x1080sf", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 540, 542, 547, 562, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN),
+ .vrefresh = 60, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@30Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@25Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* HD: 2048x1080@24Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 74250, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@48Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@50Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 1920x1080@60Hz */
+ {{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@60Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2136,
+ 2180, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@50Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G: 2048x1080@48Hz */
+ {{ DRM_MODE("2048x1080", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@96Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2291,
+ 2379, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@100Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 1920x1080i@120Hz */
+ {{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@96Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2377,
+ 2421, 2750, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 96, }, {0x8, 0x4},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@100Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2322,
+ 2366, 2640, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 100, }, {0x9, 0x9},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 3G-B: 2048x1080i@120Hz */
+ {{ DRM_MODE("2048x1080i", DRM_MODE_TYPE_DRIVER, 148500, 2048, 2114,
+ 2134, 2200, 0, 1080, 1084, 1094, 1124, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE),
+ .vrefresh = 120, }, {0xB, 0xA},
+ {0x85, 0x85, 0x89, 0x8A, 0xC1, 0xC1} },
+ /* 6G: 3840x2160@30Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@25Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 3840x2160@24Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@24Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, }, {0x3, 0x2},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@25Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, }, {0x5, 0x5},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 6G: 4096x2160@30Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 296704, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, }, {0x7, 0x6},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@48Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@50Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 3840x2160@60Hz */
+ {{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@48Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, }, {0x8, 0x4},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@50Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, }, {0x9, 0x9},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+ /* 12G: 4096x2160@60Hz */
+ {{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 593408, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, }, {0xB, 0xA},
+ {0x98, 0x98, 0x97, 0x98, 0xC0, 0xCE} },
+};
+
+#endif /* _XLNX_SDI_MODES_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
new file mode 100644
index 000000000000..77e736b5f70e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#include <drm/drm_print.h>
+#include <linux/device.h>
+#include <video/videomode.h>
+#include <asm-generic/io.h>
+#include "xlnx_sdi_timing.h"
+
+/* timing controller register offsets */
+#define XSTC_CTL 0x00
+#define XSTC_STATS 0x04
+#define XSTC_ERROR 0x08
+#define XSTC_GASIZE 0x60
+#define XSTC_GENC 0x68
+#define XSTC_GPOL 0x6c
+#define XSTC_GHSIZE 0x70
+#define XSTC_GVSIZE 0x74
+#define XSTC_GHSYNC 0x78
+#define XSTC_GVBH_F0 0x7c
+#define XSTC_GVSYNC_F0 0x80
+#define XSTC_GVSH_F0 0x84
+#define XSTC_GVBH_F1 0x88
+#define XSTC_GVSYNC_F1 0x8C
+#define XSTC_GVSH_F1 0x90
+#define XSTC_GASIZE_F1 0x94
+#define XSTC_OFFSET 0x10000
+
+/* timing controller register bit */
+#define XSTC_CTL_FIP BIT(6) /* field id polarity */
+#define XSTC_CTL_ACP BIT(5) /* active chroma polarity */
+#define XSTC_CTL_AVP BIT(4) /* active video polarity */
+#define XSTC_CTL_HSP BIT(3) /* hori sync polarity */
+#define XSTC_CTL_VSP BIT(2) /* vert sync polarity */
+#define XSTC_CTL_HBP BIT(1) /* hori blank polarity */
+#define XSTC_CTL_VBP BIT(0) /* vert blank polarity */
+#define XSTC_CTL_FIPSS BIT(26) /* field id polarity source */
+#define XSTC_CTL_ACPSS BIT(25) /* active chroma polarity src */
+#define XSTC_CTL_AVPSS BIT(24) /* active video polarity src */
+#define XSTC_CTL_HSPSS BIT(23) /* hori sync polarity src */
+#define XSTC_CTL_VSPSS BIT(22) /* vert sync polarity src */
+#define XSTC_CTL_HBPSS BIT(21) /* hori blank polarity src */
+#define XSTC_CTL_VBPSS BIT(20) /* vert blank polarity src */
+#define XSTC_CTL_VCSS BIT(18) /* chroma src */
+#define XSTC_CTL_VASS BIT(17) /* vertical offset src */
+#define XSTC_CTL_VBSS BIT(16) /* vertical sync end src */
+#define XSTC_CTL_VSSS BIT(15) /* vertical sync start src */
+#define XSTC_CTL_VFSS BIT(14) /* vertical active size src */
+#define XSTC_CTL_VTSS BIT(13) /* vertical frame size src */
+#define XSTC_CTL_HBSS BIT(11) /* horiz sync end src */
+#define XSTC_CTL_HSSS BIT(10) /* horiz sync start src */
+#define XSTC_CTL_HFSS BIT(9) /* horiz active size src */
+#define XSTC_CTL_HTSS BIT(8) /* horiz frame size src */
+#define XSTC_CTL_GE BIT(2) /* timing generator enable */
+#define XSTC_CTL_RU BIT(1) /* timing register update */
+
+/* timing generator horizontal 1 */
+#define XSTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GH1_BPSTART_SHIFT 16
+#define XSTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator vertical 1 (filed 0) */
+#define XSTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XSTC_GV1_BPSTART_SHIFT 16
+#define XSTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* timing generator/detector vblank/vsync horizontal offset registers */
+#define XSTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XSTC_XVXHOX_HEND_SHIFT 16
+#define XSTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XSTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XSTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XSTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+/* reset register bit definition */
+#define XSTC_RST BIT(31)
+/* Interlaced bit in XSTC_GENC */
+#define XSTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_stc_polarity - timing signal polarity
+ *
+ * @field_id: field ID polarity
+ * @vblank: vblank polarity
+ * @vsync: vsync polarity
+ * @hblank: hblank polarity
+ * @hsync: hsync polarity
+ */
+struct xlnx_stc_polarity {
+ u8 field_id;
+ u8 vblank;
+ u8 vsync;
+ u8 hblank;
+ u8 hsync;
+};
+
+/**
+ * struct xlnx_stc_hori_off - timing signal horizontal offset
+ *
+ * @v0blank_hori_start: vblank horizontal start (field 0)
+ * @v0blank_hori_end: vblank horizontal end (field 0)
+ * @v0sync_hori_start: vsync horizontal start (field 0)
+ * @v0sync_hori_end: vsync horizontal end (field 0)
+ * @v1blank_hori_start: vblank horizontal start (field 1)
+ * @v1blank_hori_end: vblank horizontal end (field 1)
+ * @v1sync_hori_start: vsync horizontal start (field 1)
+ * @v1sync_hori_end: vsync horizontal end (field 1)
+ */
+struct xlnx_stc_hori_off {
+ u16 v0blank_hori_start;
+ u16 v0blank_hori_end;
+ u16 v0sync_hori_start;
+ u16 v0sync_hori_end;
+ u16 v1blank_hori_start;
+ u16 v1blank_hori_end;
+ u16 v1sync_hori_start;
+ u16 v1sync_hori_end;
+};
+
+/**
+ * xlnx_stc_writel - Memory mapped SDI Tx timing controller write
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ * @val: value to be written
+ *
+ * This function writes the value to SDI TX timing controller registers
+ */
+static inline void xlnx_stc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_readl - Memory mapped timing controllerregister read
+ * @base: Pointer to SDI Tx registers base
+ * @offset: Register offset
+ *
+ * Return: The contents of the SDI Tx timing controller register
+ *
+ * This function returns the contents of the corresponding SDI Tx register.
+ */
+static inline u32 xlnx_stc_readl(void __iomem *base, int offset)
+{
+ return readl(base + XSTC_OFFSET + offset);
+}
+
+/**
+ * xlnx_stc_enable - Enable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function enables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_enable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_disable - Disable timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function disables the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_disable(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_GE);
+}
+
+/**
+ * xlnx_stc_reset - Reset timing controller
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function resets the SDI Tx subsystem's timing controller
+ */
+void xlnx_stc_reset(void __iomem *base)
+{
+ u32 reg;
+
+ xlnx_stc_writel(base, XSTC_CTL, XSTC_RST);
+
+ /* enable register update */
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
+
+/**
+ * xlnx_stc_polarity - Configure timing signal polarity
+ * @base: Base address of SDI Tx subsystem
+ * @polarity: timing signal polarity data
+ *
+ * This function configure timing signal polarity
+ */
+static void xlnx_stc_polarity(void __iomem *base,
+ struct xlnx_stc_polarity *polarity)
+{
+ u32 reg = 0;
+
+ reg = XSTC_CTL_ACP;
+ reg |= XSTC_CTL_AVP;
+ if (polarity->field_id)
+ reg |= XSTC_CTL_FIP;
+ if (polarity->vblank)
+ reg |= XSTC_CTL_VBP;
+ if (polarity->vsync)
+ reg |= XSTC_CTL_VSP;
+ if (polarity->hblank)
+ reg |= XSTC_CTL_HBP;
+ if (polarity->hsync)
+ reg |= XSTC_CTL_HSP;
+
+ xlnx_stc_writel(base, XSTC_GPOL, reg);
+}
+
+/**
+ * xlnx_stc_hori_off - Configure horzontal timing offset
+ * @base: Base address of SDI Tx subsystem
+ * @hori_off: horizontal offset configuration data
+ * @flags: Display flags
+ *
+ * This function configure horizontal offset
+ */
+static void xlnx_stc_hori_off(void __iomem *base,
+ struct xlnx_stc_hori_off *hori_off,
+ enum display_flags flags)
+{
+ u32 reg;
+
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hori_off->v0blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hori_off->v0sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v0sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1blank_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1blank_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVBH_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hori_off->v1sync_hori_start & XSTC_XVXHOX_HSTART_MASK;
+ reg |= (hori_off->v1sync_hori_end << XSTC_XVXHOX_HEND_SHIFT) &
+ XSTC_XVXHOX_HEND_MASK;
+ xlnx_stc_writel(base, XSTC_GVSH_F1, reg);
+ }
+}
+
+/**
+ * xlnx_stc_src - Configure timing source
+ * @base: Base address of SDI Tx subsystem
+ *
+ * This function configure timing source
+ */
+static void xlnx_stc_src(void __iomem *base)
+{
+ u32 reg;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ reg |= XSTC_CTL_VCSS;
+ reg |= XSTC_CTL_VASS;
+ reg |= XSTC_CTL_VBSS;
+ reg |= XSTC_CTL_VSSS;
+ reg |= XSTC_CTL_VFSS;
+ reg |= XSTC_CTL_VTSS;
+ reg |= XSTC_CTL_HBSS;
+ reg |= XSTC_CTL_HSSS;
+ reg |= XSTC_CTL_HFSS;
+ reg |= XSTC_CTL_HTSS;
+ xlnx_stc_writel(base, XSTC_CTL, reg);
+}
+
+/**
+ * xlnx_stc_sig - Generates timing signal
+ * @base: Base address of SDI Tx subsystem
+ * @vm: video mode
+ *
+ * This function generated the timing for given vide mode
+ */
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_stc_hori_off hori_off;
+ struct xlnx_stc_polarity polarity;
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg & ~XSTC_CTL_RU);
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ DRM_DEBUG_DRIVER("ha: %d, va: %d\n", hactive, vactive);
+ DRM_DEBUG_DRIVER("hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ DRM_DEBUG_DRIVER("vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+ DRM_DEBUG_DRIVER("ht: %d, vt: %d\n", htotal, vtotal);
+
+ reg = htotal & XSTC_GHFRAME_HSIZE;
+ xlnx_stc_writel(base, XSTC_GHSIZE, reg);
+ reg = vtotal & XSTC_GVFRAME_HSIZE_F1;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ reg |= (reg + 2) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ else
+ reg |= (reg + 1) <<
+ XSTC_GV1_BPSTART_SHIFT;
+ } else {
+ reg |= reg << XSTC_GV1_BPSTART_SHIFT;
+ }
+ xlnx_stc_writel(base, XSTC_GVSIZE, reg);
+ reg = hactive & XSTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vactive == 243)
+ reg = ((vactive + 1) & XSTC_GA_ACTSIZE_MASK) << 16;
+ else
+ reg = (vactive & XSTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_stc_writel(base, XSTC_GASIZE_F1, reg);
+ }
+
+ reg = hsync_start & XSTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XSTC_GH1_BPSTART_SHIFT) &
+ XSTC_GH1_BPSTART_MASK;
+ xlnx_stc_writel(base, XSTC_GHSYNC, reg);
+ reg = vsync_start & XSTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XSTC_GV1_BPSTART_SHIFT) &
+ XSTC_GV1_BPSTART_MASK;
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 0
+ * for all interlaced modes including 3GB.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) - 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) - 1);
+
+ xlnx_stc_writel(base, XSTC_GVSYNC_F0, reg);
+
+ /*
+ * Fix the Vsync_vstart and vsync_vend of Field 1
+ * for interlaced and 3GB modes.
+ */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ if (vm->pixelclock == 148500000)
+ /* Revert and increase by 1 for 3GB mode */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 2) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 2);
+ else
+ /* Only revert the reduction */
+ reg = ((((reg & XSTC_GV1_BPSTART_MASK) >>
+ XSTC_GV1_BPSTART_SHIFT) + 1) <<
+ XSTC_GV1_BPSTART_SHIFT) |
+ ((reg & XSTC_GV1_SYNCSTART_MASK) + 1);
+ }
+
+ hori_off.v0blank_hori_start = hactive;
+ hori_off.v0blank_hori_end = hactive;
+ hori_off.v0sync_hori_start = hsync_start;
+ hori_off.v0sync_hori_end = hsync_start;
+ hori_off.v1blank_hori_start = hactive;
+ hori_off.v1blank_hori_end = hactive;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ hori_off.v1sync_hori_start = hsync_start - (htotal / 2);
+ hori_off.v1sync_hori_end = hsync_start - (htotal / 2);
+ xlnx_stc_writel(base, XSTC_GVSYNC_F1, reg);
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ | XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ } else {
+ hori_off.v1sync_hori_start = hsync_start;
+ hori_off.v1sync_hori_end = hsync_start;
+ reg = xlnx_stc_readl(base, XSTC_GENC)
+ & ~XSTC_GENC_INTERL;
+ xlnx_stc_writel(base, XSTC_GENC, reg);
+ }
+
+ xlnx_stc_hori_off(base, &hori_off, vm->flags);
+ /* set up polarity */
+ memset(&polarity, 0x0, sizeof(polarity));
+ polarity.hsync = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vsync = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.hblank = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ polarity.vblank = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ polarity.field_id = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
+ xlnx_stc_polarity(base, &polarity);
+
+ xlnx_stc_src(base);
+
+ reg = xlnx_stc_readl(base, XSTC_CTL);
+ xlnx_stc_writel(base, XSTC_CTL, reg | XSTC_CTL_RU);
+}
diff --git a/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
new file mode 100644
index 000000000000..4ca9f8972e0a
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_sdi_timing.h
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA SDI Tx timing controller driver
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Contacts: Saurabh Sengar <saurabhs@xilinx.com>
+ */
+
+#ifndef _XLNX_SDI_TIMING_H_
+#define _XLNX_SDI_TIMING_H_
+
+struct videomode;
+
+void xlnx_stc_enable(void __iomem *base);
+void xlnx_stc_disable(void __iomem *base);
+void xlnx_stc_reset(void __iomem *base);
+void xlnx_stc_sig(void __iomem *base, struct videomode *vm);
+
+#endif /* _XLNX_SDI_TIMING_H_ */
diff --git a/drivers/gpu/drm/xlnx/xlnx_vtc.c b/drivers/gpu/drm/xlnx/xlnx_vtc.c
new file mode 100644
index 000000000000..427b35b84e16
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/xlnx_vtc.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Video Timing Controller support for Xilinx DRM KMS
+ *
+ * Copyright (C) 2013 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ * Saurabh Sengar <saurabhs@xilinx.com>
+ * Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver adds support to control the Xilinx Video Timing
+ * Controller connected to the CRTC.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <video/videomode.h>
+#include "xlnx_bridge.h"
+
+/* register offsets */
+#define XVTC_CTL 0x000
+#define XVTC_VER 0x010
+#define XVTC_GASIZE 0x060
+#define XVTC_GENC 0x068
+#define XVTC_GPOL 0x06c
+#define XVTC_GHSIZE 0x070
+#define XVTC_GVSIZE 0x074
+#define XVTC_GHSYNC 0x078
+#define XVTC_GVBHOFF_F0 0x07c
+#define XVTC_GVSYNC_F0 0x080
+#define XVTC_GVSHOFF_F0 0x084
+#define XVTC_GVBHOFF_F1 0x088
+#define XVTC_GVSYNC_F1 0x08C
+#define XVTC_GVSHOFF_F1 0x090
+#define XVTC_GASIZE_F1 0x094
+
+/* vtc control register bits */
+#define XVTC_CTL_SWRESET BIT(31)
+#define XVTC_CTL_FIPSS BIT(26)
+#define XVTC_CTL_ACPSS BIT(25)
+#define XVTC_CTL_AVPSS BIT(24)
+#define XVTC_CTL_HSPSS BIT(23)
+#define XVTC_CTL_VSPSS BIT(22)
+#define XVTC_CTL_HBPSS BIT(21)
+#define XVTC_CTL_VBPSS BIT(20)
+#define XVTC_CTL_VCSS BIT(18)
+#define XVTC_CTL_VASS BIT(17)
+#define XVTC_CTL_VBSS BIT(16)
+#define XVTC_CTL_VSSS BIT(15)
+#define XVTC_CTL_VFSS BIT(14)
+#define XVTC_CTL_VTSS BIT(13)
+#define XVTC_CTL_HBSS BIT(11)
+#define XVTC_CTL_HSSS BIT(10)
+#define XVTC_CTL_HFSS BIT(9)
+#define XVTC_CTL_HTSS BIT(8)
+#define XVTC_CTL_GE BIT(2)
+#define XVTC_CTL_RU BIT(1)
+
+/* vtc generator polarity register bits */
+#define XVTC_GPOL_FIP BIT(6)
+#define XVTC_GPOL_ACP BIT(5)
+#define XVTC_GPOL_AVP BIT(4)
+#define XVTC_GPOL_HSP BIT(3)
+#define XVTC_GPOL_VSP BIT(2)
+#define XVTC_GPOL_HBP BIT(1)
+#define XVTC_GPOL_VBP BIT(0)
+
+/* vtc generator horizontal 1 */
+#define XVTC_GH1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GH1_BPSTART_SHIFT 16
+#define XVTC_GH1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator vertical 1 (field 0) */
+#define XVTC_GV1_BPSTART_MASK GENMASK(28, 16)
+#define XVTC_GV1_BPSTART_SHIFT 16
+#define XVTC_GV1_SYNCSTART_MASK GENMASK(12, 0)
+/* vtc generator/detector vblank/vsync horizontal offset registers */
+#define XVTC_XVXHOX_HEND_MASK GENMASK(28, 16)
+#define XVTC_XVXHOX_HEND_SHIFT 16
+#define XVTC_XVXHOX_HSTART_MASK GENMASK(12, 0)
+
+#define XVTC_GHFRAME_HSIZE GENMASK(12, 0)
+#define XVTC_GVFRAME_HSIZE_F1 GENMASK(12, 0)
+#define XVTC_GA_ACTSIZE_MASK GENMASK(12, 0)
+
+/* vtc generator encoding register bits */
+#define XVTC_GENC_INTERL BIT(6)
+
+/**
+ * struct xlnx_vtc - Xilinx VTC object
+ *
+ * @bridge: xilinx bridge structure
+ * @dev: device structure
+ * @base: base addr
+ * @ppc: pixels per clock
+ * @axi_clk: AXI Lite clock
+ * @vid_clk: Video clock
+ */
+struct xlnx_vtc {
+ struct xlnx_bridge bridge;
+ struct device *dev;
+ void __iomem *base;
+ u32 ppc;
+ struct clk *axi_clk;
+ struct clk *vid_clk;
+};
+
+static inline void xlnx_vtc_writel(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static inline u32 xlnx_vtc_readl(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static inline struct xlnx_vtc *bridge_to_vtc(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct xlnx_vtc, bridge);
+}
+
+static void xlnx_vtc_reset(struct xlnx_vtc *vtc)
+{
+ u32 reg;
+
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, XVTC_CTL_SWRESET);
+
+ /* enable register update */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+}
+
+/**
+ * xlnx_vtc_enable - Enable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function enables the VTC
+ */
+static int xlnx_vtc_enable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* enable generator */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_GE);
+ dev_dbg(vtc->dev, "enabled\n");
+ return 0;
+}
+
+/**
+ * xlnx_vtc_disable - Disable the VTC
+ * @bridge: xilinx bridge structure pointer
+ *
+ * This function disables and resets the VTC.
+ */
+static void xlnx_vtc_disable(struct xlnx_bridge *bridge)
+{
+ u32 reg;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ /* disable generator and reset */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_GE);
+ xlnx_vtc_reset(vtc);
+ dev_dbg(vtc->dev, "disabled\n");
+}
+
+/**
+ * xlnx_vtc_set_timing - Configures the VTC
+ * @bridge: xilinx bridge structure pointer
+ * @vm: video mode requested
+ *
+ * Return:
+ * Zero on success.
+ *
+ * This function calculates the timing values from the video mode
+ * structure passed from the CRTC and configures the VTC.
+ */
+static int xlnx_vtc_set_timing(struct xlnx_bridge *bridge,
+ struct videomode *vm)
+{
+ u32 reg;
+ u32 htotal, hactive, hsync_start, hbackporch_start;
+ u32 vtotal, vactive, vsync_start, vbackporch_start;
+ struct xlnx_vtc *vtc = bridge_to_vtc(bridge);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg & ~XVTC_CTL_RU);
+
+ vm->hactive /= vtc->ppc;
+ vm->hfront_porch /= vtc->ppc;
+ vm->hback_porch /= vtc->ppc;
+ vm->hsync_len /= vtc->ppc;
+
+ htotal = vm->hactive + vm->hfront_porch + vm->hsync_len +
+ vm->hback_porch;
+ vtotal = vm->vactive + vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ hactive = vm->hactive;
+ vactive = vm->vactive;
+
+ hsync_start = vm->hactive + vm->hfront_porch;
+ vsync_start = vm->vactive + vm->vfront_porch;
+
+ hbackporch_start = hsync_start + vm->hsync_len;
+ vbackporch_start = vsync_start + vm->vsync_len;
+
+ dev_dbg(vtc->dev, "ha: %d, va: %d\n", hactive, vactive);
+ dev_dbg(vtc->dev, "ht: %d, vt: %d\n", htotal, vtotal);
+ dev_dbg(vtc->dev, "hs: %d, hb: %d\n", hsync_start, hbackporch_start);
+ dev_dbg(vtc->dev, "vs: %d, vb: %d\n", vsync_start, vbackporch_start);
+
+ reg = htotal & XVTC_GHFRAME_HSIZE;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSIZE, reg);
+
+ reg = vtotal & XVTC_GVFRAME_HSIZE_F1;
+ reg |= reg << XVTC_GV1_BPSTART_SHIFT;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSIZE, reg);
+
+ reg = hactive & XVTC_GA_ACTSIZE_MASK;
+ reg |= (vactive & XVTC_GA_ACTSIZE_MASK) << 16;
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GASIZE_F1, reg);
+
+ reg = hsync_start & XVTC_GH1_SYNCSTART_MASK;
+ reg |= (hbackporch_start << XVTC_GH1_BPSTART_SHIFT) &
+ XVTC_GH1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GHSYNC, reg);
+
+ reg = vsync_start & XVTC_GV1_SYNCSTART_MASK;
+ reg |= (vbackporch_start << XVTC_GV1_BPSTART_SHIFT) &
+ XVTC_GV1_BPSTART_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F0, reg);
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ xlnx_vtc_writel(vtc->base, XVTC_GVSYNC_F1, reg);
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) | XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ } else {
+ reg = xlnx_vtc_readl(vtc->base, XVTC_GENC) & ~XVTC_GENC_INTERL;
+ xlnx_vtc_writel(vtc->base, XVTC_GENC, reg);
+ }
+
+ /* configure horizontal offset */
+ /* Calculate and update Generator VBlank Hori field 0 */
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F0, reg);
+
+ /* Calculate and update Generator VSync Hori field 0 */
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F0, reg);
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = hactive & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hactive << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ xlnx_vtc_writel(vtc->base, XVTC_GVBHOFF_F1, reg);
+ }
+
+ /* Calculate and update Generator VBlank Hori field 1 */
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED) {
+ reg = (hsync_start - (htotal / 2)) & XVTC_XVXHOX_HSTART_MASK;
+ reg |= ((hsync_start - (htotal / 2)) <<
+ XVTC_XVXHOX_HEND_SHIFT) & XVTC_XVXHOX_HEND_MASK;
+ } else {
+ reg = hsync_start & XVTC_XVXHOX_HSTART_MASK;
+ reg |= (hsync_start << XVTC_XVXHOX_HEND_SHIFT) &
+ XVTC_XVXHOX_HEND_MASK;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ xlnx_vtc_writel(vtc->base, XVTC_GVSHOFF_F1, reg);
+
+ /* configure polarity of signals */
+ reg = 0;
+ reg |= XVTC_GPOL_ACP;
+ reg |= XVTC_GPOL_AVP;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ reg |= XVTC_GPOL_FIP;
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH) {
+ reg |= XVTC_GPOL_VBP;
+ reg |= XVTC_GPOL_VSP;
+ }
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH) {
+ reg |= XVTC_GPOL_HBP;
+ reg |= XVTC_GPOL_HSP;
+ }
+ xlnx_vtc_writel(vtc->base, XVTC_GPOL, reg);
+
+ /* configure timing source */
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ reg |= XVTC_CTL_VCSS;
+ reg |= XVTC_CTL_VASS;
+ reg |= XVTC_CTL_VBSS;
+ reg |= XVTC_CTL_VSSS;
+ reg |= XVTC_CTL_VFSS;
+ reg |= XVTC_CTL_VTSS;
+ reg |= XVTC_CTL_HBSS;
+ reg |= XVTC_CTL_HSSS;
+ reg |= XVTC_CTL_HFSS;
+ reg |= XVTC_CTL_HTSS;
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg);
+
+ reg = xlnx_vtc_readl(vtc->base, XVTC_CTL);
+ xlnx_vtc_writel(vtc->base, XVTC_CTL, reg | XVTC_CTL_RU);
+ dev_dbg(vtc->dev, "set timing done\n");
+
+ return 0;
+}
+
+static int xlnx_vtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xlnx_vtc *vtc;
+ struct resource *res;
+ int ret;
+
+ vtc = devm_kzalloc(dev, sizeof(*vtc), GFP_KERNEL);
+ if (!vtc)
+ return -ENOMEM;
+
+ vtc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get resource for device\n");
+ return -EFAULT;
+ }
+
+ vtc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtc->base)) {
+ dev_err(dev, "failed to remap io region\n");
+ return PTR_ERR(vtc->base);
+ }
+
+ platform_set_drvdata(pdev, vtc);
+
+ ret = of_property_read_u32(dev->of_node, "xlnx,pixels-per-clock",
+ &vtc->ppc);
+ if (ret || (vtc->ppc != 1 && vtc->ppc != 2 && vtc->ppc != 4)) {
+ dev_err(dev, "failed to get ppc\n");
+ return ret;
+ }
+ dev_info(dev, "vtc ppc = %d\n", vtc->ppc);
+
+ vtc->axi_clk = devm_clk_get(vtc->dev, "s_axi_aclk");
+ if (IS_ERR(vtc->axi_clk)) {
+ ret = PTR_ERR(vtc->axi_clk);
+ dev_err(dev, "failed to get axi lite clk %d\n", ret);
+ return ret;
+ }
+
+ vtc->vid_clk = devm_clk_get(vtc->dev, "clk");
+ if (IS_ERR(vtc->vid_clk)) {
+ ret = PTR_ERR(vtc->vid_clk);
+ dev_err(dev, "failed to get video clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->axi_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable axilite clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(vtc->vid_clk);
+ if (ret) {
+ dev_err(vtc->dev, "unable to enable video clk %d\n", ret);
+ goto err_axi_clk;
+ }
+
+ xlnx_vtc_reset(vtc);
+
+ vtc->bridge.enable = &xlnx_vtc_enable;
+ vtc->bridge.disable = &xlnx_vtc_disable;
+ vtc->bridge.set_timing = &xlnx_vtc_set_timing;
+ vtc->bridge.of_node = dev->of_node;
+ ret = xlnx_bridge_register(&vtc->bridge);
+ if (ret) {
+ dev_err(dev, "Bridge registration failed\n");
+ goto err_vid_clk;
+ }
+
+ dev_info(dev, "Xilinx VTC IP version : 0x%08x\n",
+ xlnx_vtc_readl(vtc->base, XVTC_VER));
+ dev_info(dev, "Xilinx VTC DRM Bridge driver probed\n");
+ return 0;
+
+err_vid_clk:
+ clk_disable_unprepare(vtc->vid_clk);
+err_axi_clk:
+ clk_disable_unprepare(vtc->axi_clk);
+ return ret;
+}
+
+static int xlnx_vtc_remove(struct platform_device *pdev)
+{
+ struct xlnx_vtc *vtc = platform_get_drvdata(pdev);
+
+ xlnx_bridge_unregister(&vtc->bridge);
+ clk_disable_unprepare(vtc->vid_clk);
+ clk_disable_unprepare(vtc->axi_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_vtc_of_match[] = {
+ { .compatible = "xlnx,bridge-v-tc-6.1" },
+ { /* end of table */ },
+};
+
+MODULE_DEVICE_TABLE(of, xlnx_vtc_of_match);
+
+static struct platform_driver xlnx_vtc_bridge_driver = {
+ .probe = xlnx_vtc_probe,
+ .remove = xlnx_vtc_remove,
+ .driver = {
+ .name = "xlnx,bridge-vtc",
+ .of_match_table = xlnx_vtc_of_match,
+ },
+};
+
+module_platform_driver(xlnx_vtc_bridge_driver);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx VTC Bridge Driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
new file mode 100644
index 000000000000..5e06f84660e1
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -0,0 +1,3343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Controller Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include "xlnx_bridge.h"
+#include "xlnx_crtc.h"
+#include "xlnx_fb.h"
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+/*
+ * Overview
+ * --------
+ *
+ * The display part of ZynqMP DP subsystem. Internally, the device
+ * is partitioned into 3 blocks: AV buffer manager, Blender, Audio.
+ * The driver creates the DRM crtc and plane objectes and maps the DRM
+ * interface into those 3 blocks. In high level, the driver is layered
+ * in the following way:
+ *
+ * zynqmp_disp_crtc & zynqmp_disp_plane
+ * |->zynqmp_disp
+ * |->zynqmp_disp_aud
+ * |->zynqmp_disp_blend
+ * |->zynqmp_disp_av_buf
+ *
+ * The driver APIs are used externally by
+ * - zynqmp_dpsub: Top level ZynqMP DP subsystem driver
+ * - zynqmp_dp: ZynqMP DP driver
+ * - xlnx_crtc: Xilinx DRM specific crtc functions
+ */
+
+/* The default value is ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 */
+static uint zynqmp_disp_gfx_init_fmt;
+module_param_named(gfx_init_fmt, zynqmp_disp_gfx_init_fmt, uint, 0444);
+MODULE_PARM_DESC(gfx_init_fmt, "The initial format of the graphics layer\n"
+ "\t\t0 = rgb565 (default)\n"
+ "\t\t1 = rgb888\n"
+ "\t\t2 = argb8888\n");
+/* These value should be mapped to index of av_buf_gfx_fmts[] */
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565 10
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888 5
+#define ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888 1
+static const u32 zynqmp_disp_gfx_init_fmts[] = {
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB888,
+ ZYNQMP_DISP_AV_BUF_GFX_FMT_ARGB8888,
+};
+
+/* Blender registers */
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_0 0x0
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_1 0x4
+#define ZYNQMP_DISP_V_BLEND_BG_CLR_2 0x8
+#define ZYNQMP_DISP_V_BLEND_BG_MAX 0xfff
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA 0xc
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK 0x1fe
+#define ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX 0xff
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT 0x14
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB 0x0
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444 0x1
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422 0x2
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY 0x3
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_XVYCC 0x4
+#define ZYNQMP_DISP_V_BLEND_OUTPUT_EN_DOWNSAMPLE BIT(4)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL 0x18
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US BIT(0)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB BIT(1)
+#define ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_BYPASS BIT(8)
+#define ZYNQMP_DISP_V_BLEND_NUM_COEFF 9
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0 0x20
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF1 0x24
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF2 0x28
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF3 0x2c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF4 0x30
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF5 0x34
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF6 0x38
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF7 0x3c
+#define ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF8 0x40
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0 0x44
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF1 0x48
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF2 0x4c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF3 0x50
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF4 0x54
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF5 0x58
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF6 0x5c
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF7 0x60
+#define ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF8 0x64
+#define ZYNQMP_DISP_V_BLEND_NUM_OFFSET 3
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET 0x68
+#define ZYNQMP_DISP_V_BLEND_CR_IN1CSC_OFFSET 0x6c
+#define ZYNQMP_DISP_V_BLEND_CB_IN1CSC_OFFSET 0x70
+#define ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET 0x74
+#define ZYNQMP_DISP_V_BLEND_CR_OUTCSC_OFFSET 0x78
+#define ZYNQMP_DISP_V_BLEND_CB_OUTCSC_OFFSET 0x7c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0 0x80
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF1 0x84
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF2 0x88
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF3 0x8c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF4 0x90
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF5 0x94
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF6 0x98
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF7 0x9c
+#define ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF8 0xa0
+#define ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET 0xa4
+#define ZYNQMP_DISP_V_BLEND_CR_IN2CSC_OFFSET 0xa8
+#define ZYNQMP_DISP_V_BLEND_CB_IN2CSC_OFFSET 0xac
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_ENABLE 0x1d0
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP1 0x1d4
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP2 0x1d8
+#define ZYNQMP_DISP_V_BLEND_CHROMA_KEY_COMP3 0x1dc
+
+/* AV buffer manager registers */
+#define ZYNQMP_DISP_AV_BUF_FMT 0x0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK (0x1f << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_UYVY (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YVYU (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16 (4 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24 (5 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI (6 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO (7 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2 (8 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444 (9 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888 (10 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880 (11 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10 (12 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUV444_10 (13 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_10 (14 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10 (15 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_10 (16 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24_10 (17 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10 (18 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420 (19 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420 (20 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420 (21 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420_10 (22 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10 (23 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI2_420_10 (24 << 0)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_SHIFT 8
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK (0xf << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888 (0 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888 (1 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888 (2 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888 (3 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551 (4 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444 (5 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565 (6 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_8BPP (7 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_4BPP (8 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_2BPP (9 << 8)
+#define ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_1BPP (10 << 8)
+#define ZYNQMP_DISP_AV_BUF_NON_LIVE_LATENCY 0x8
+#define ZYNQMP_DISP_AV_BUF_CHBUF 0x10
+#define ZYNQMP_DISP_AV_BUF_CHBUF_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MASK (0xf << 2)
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX 0xf
+#define ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX 0x3
+#define ZYNQMP_DISP_AV_BUF_STATUS 0x28
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL 0x2c
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EN BIT(0)
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_SHIFT 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VSYNC 0
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_VID 1
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_EX_AUD 2
+#define ZYNQMP_DISP_AV_BUF_STC_CTRL_EVENT_INT_VSYNC 3
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE0 0x30
+#define ZYNQMP_DISP_AV_BUF_STC_INIT_VALUE1 0x34
+#define ZYNQMP_DISP_AV_BUF_STC_ADJ 0x38
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS0 0x3c
+#define ZYNQMP_DISP_AV_BUF_STC_VID_VSYNC_TS1 0x40
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS0 0x44
+#define ZYNQMP_DISP_AV_BUF_STC_EXT_VSYNC_TS1 0x48
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS0 0x4c
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT_TS1 0x50
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS0 0x54
+#define ZYNQMP_DISP_AV_BUF_STC_CUSTOM_EVENT2_TS1 0x58
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT0 0x60
+#define ZYNQMP_DISP_AV_BUF_STC_SNAPSHOT1 0x64
+#define ZYNQMP_DISP_AV_BUF_OUTPUT 0x70
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_SHIFT 0
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK (0x3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE (0 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM (1 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN (2 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE (3 << 0)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_SHIFT 2
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK (0x3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE (0 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM (1 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE (2 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_NONE (3 << 2)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_SHIFT 4
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK (0x3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PL (0 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM (1 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_PATTERN (2 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE (3 << 4)
+#define ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN BIT(6)
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT0 0x74
+#define ZYNQMP_DISP_AV_BUF_HCOUNT_VCOUNT_INT1 0x78
+#define ZYNQMP_DISP_AV_BUF_PATTERN_GEN_SELECT 0x100
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC 0x120
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS BIT(0)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS BIT(1)
+#define ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING BIT(2)
+#define ZYNQMP_DISP_AV_BUF_SRST_REG 0x124
+#define ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST BIT(1)
+#define ZYNQMP_DISP_AV_BUF_AUDIO_CH_CONFIG 0x12c
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF 0x200
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP1_SF 0x204
+#define ZYNQMP_DISP_AV_BUF_GFX_COMP2_SF 0x208
+#define ZYNQMP_DISP_AV_BUF_VID_COMP0_SF 0x20c
+#define ZYNQMP_DISP_AV_BUF_VID_COMP1_SF 0x210
+#define ZYNQMP_DISP_AV_BUF_VID_COMP2_SF 0x214
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF 0x218
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP1_SF 0x21c
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP2_SF 0x220
+#define ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG 0x224
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF 0x228
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP1_SF 0x22c
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP2_SF 0x230
+#define ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG 0x234
+#define ZYNQMP_DISP_AV_BUF_4BIT_SF 0x11111
+#define ZYNQMP_DISP_AV_BUF_5BIT_SF 0x10842
+#define ZYNQMP_DISP_AV_BUF_6BIT_SF 0x10410
+#define ZYNQMP_DISP_AV_BUF_8BIT_SF 0x10101
+#define ZYNQMP_DISP_AV_BUF_10BIT_SF 0x10040
+#define ZYNQMP_DISP_AV_BUF_NULL_SF 0
+#define ZYNQMP_DISP_AV_BUF_NUM_SF 3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_12 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_MASK GENMASK(2, 0)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB 0x0
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444 0x1
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422 0x2
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YONLY 0x3
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_MASK GENMASK(5, 4)
+#define ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_CB_FIRST BIT(8)
+#define ZYNQMP_DISP_AV_BUF_PALETTE_MEMORY 0x400
+
+/* Audio registers */
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
+#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
+#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
+#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
+#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
+#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
+#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
+#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
+#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
+#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
+#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
+#define ZYNQMP_DISP_AUD_CH_A_DATA3 0x2c
+#define ZYNQMP_DISP_AUD_CH_A_DATA4 0x30
+#define ZYNQMP_DISP_AUD_CH_A_DATA5 0x34
+#define ZYNQMP_DISP_AUD_CH_B_DATA0 0x38
+#define ZYNQMP_DISP_AUD_CH_B_DATA1 0x3c
+#define ZYNQMP_DISP_AUD_CH_B_DATA2 0x40
+#define ZYNQMP_DISP_AUD_CH_B_DATA3 0x44
+#define ZYNQMP_DISP_AUD_CH_B_DATA4 0x48
+#define ZYNQMP_DISP_AUD_CH_B_DATA5 0x4c
+#define ZYNQMP_DISP_AUD_SOFT_RESET 0xc00
+#define ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST BIT(0)
+
+#define ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS 4
+#define ZYNQMP_DISP_AV_BUF_NUM_BUFFERS 6
+
+#define ZYNQMP_DISP_NUM_LAYERS 2
+#define ZYNQMP_DISP_MAX_NUM_SUB_PLANES 3
+/*
+ * 3840x2160 is advertised max resolution, but almost any resolutions under
+ * 300Mhz pixel rate would work. Thus put 4096 as maximum width and height.
+ */
+#define ZYNQMP_DISP_MAX_WIDTH 4096
+#define ZYNQMP_DISP_MAX_HEIGHT 4096
+/* 44 bit addressing. This is acutally DPDMA limitation */
+#define ZYNQMP_DISP_MAX_DMA_BIT 44
+
+/**
+ * enum zynqmp_disp_layer_type - Layer type (can be used for hw ID)
+ * @ZYNQMP_DISP_LAYER_VID: Video layer
+ * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
+ */
+enum zynqmp_disp_layer_type {
+ ZYNQMP_DISP_LAYER_VID,
+ ZYNQMP_DISP_LAYER_GFX
+};
+
+/**
+ * enum zynqmp_disp_layer_mode - Layer mode
+ * @ZYNQMP_DISP_LAYER_NONLIVE: non-live (memory) mode
+ * @ZYNQMP_DISP_LAYER_LIVE: live (stream) mode
+ */
+enum zynqmp_disp_layer_mode {
+ ZYNQMP_DISP_LAYER_NONLIVE,
+ ZYNQMP_DISP_LAYER_LIVE
+};
+
+/**
+ * struct zynqmp_disp_layer_dma - struct for DMA engine
+ * @chan: DMA channel
+ * @is_active: flag if the DMA is active
+ * @xt: Interleaved desc config container
+ * @sgl: Data chunk for dma_interleaved_template
+ */
+struct zynqmp_disp_layer_dma {
+ struct dma_chan *chan;
+ bool is_active;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+/**
+ * struct zynqmp_disp_layer - Display subsystem layer
+ * @plane: DRM plane
+ * @bridge: Xlnx bridge
+ * @of_node: device node
+ * @dma: struct for DMA engine
+ * @num_chan: Number of DMA channel
+ * @id: Layer ID
+ * @offset: Layer offset in the register space
+ * @enabled: flag if enabled
+ * @fmt: Current format descriptor
+ * @drm_fmts: Array of supported DRM formats
+ * @num_fmts: Number of supported DRM formats
+ * @bus_fmts: Array of supported bus formats
+ * @num_bus_fmts: Number of supported bus formats
+ * @w: Width
+ * @h: Height
+ * @mode: the operation mode
+ * @other: other layer
+ * @disp: back pointer to struct zynqmp_disp
+ */
+struct zynqmp_disp_layer {
+ struct drm_plane plane;
+ struct xlnx_bridge bridge;
+ struct device_node *of_node;
+ struct zynqmp_disp_layer_dma dma[ZYNQMP_DISP_MAX_NUM_SUB_PLANES];
+ unsigned int num_chan;
+ enum zynqmp_disp_layer_type id;
+ u32 offset;
+ u8 enabled;
+ const struct zynqmp_disp_fmt *fmt;
+ u32 *drm_fmts;
+ unsigned int num_fmts;
+ u32 *bus_fmts;
+ unsigned int num_bus_fmts;
+ u32 w;
+ u32 h;
+ enum zynqmp_disp_layer_mode mode;
+ struct zynqmp_disp_layer *other;
+ struct zynqmp_disp *disp;
+};
+
+/**
+ * struct zynqmp_disp_blend - Blender
+ * @base: Base address offset
+ */
+struct zynqmp_disp_blend {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_av_buf - AV buffer manager
+ * @base: Base address offset
+ */
+struct zynqmp_disp_av_buf {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp_aud - Audio
+ * @base: Base address offset
+ */
+struct zynqmp_disp_aud {
+ void __iomem *base;
+};
+
+/**
+ * struct zynqmp_disp - Display subsystem
+ * @xlnx_crtc: Xilinx DRM crtc
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @enabled: flag if enabled
+ * @blend: Blender block
+ * @av_buf: AV buffer manager block
+ * @aud:Audio block
+ * @layers: layers
+ * @g_alpha_prop: global alpha property
+ * @alpha: current global alpha value
+ * @g_alpha_en_prop: the global alpha enable property
+ * @alpha_en: flag if the global alpha is enabled
+ * @color_prop: output color format property
+ * @color: current output color value
+ * @bg_c0_prop: 1st component of background color property
+ * @bg_c0: current value of 1st background color component
+ * @bg_c1_prop: 2nd component of background color property
+ * @bg_c1: current value of 2nd background color component
+ * @bg_c2_prop: 3rd component of background color property
+ * @bg_c2: current value of 3rd background color component
+ * @tpg_prop: Test Pattern Generation mode property
+ * @tpg_on: current TPG mode state
+ * @event: pending vblank event request
+ * @_ps_pclk: Pixel clock from PS
+ * @_pl_pclk: Pixel clock from PL
+ * @pclk: Pixel clock
+ * @pclk_en: Flag if the pixel clock is enabled
+ * @_ps_audclk: Audio clock from PS
+ * @_pl_audclk: Audio clock from PL
+ * @audclk: Audio clock
+ * @audclk_en: Flag if the audio clock is enabled
+ * @aclk: APB clock
+ * @aclk_en: Flag if the APB clock is enabled
+ */
+struct zynqmp_disp {
+ struct xlnx_crtc xlnx_crtc;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ bool enabled;
+ struct zynqmp_disp_blend blend;
+ struct zynqmp_disp_av_buf av_buf;
+ struct zynqmp_disp_aud aud;
+ struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
+ struct drm_property *g_alpha_prop;
+ u32 alpha;
+ struct drm_property *g_alpha_en_prop;
+ bool alpha_en;
+ struct drm_property *color_prop;
+ unsigned int color;
+ struct drm_property *bg_c0_prop;
+ u32 bg_c0;
+ struct drm_property *bg_c1_prop;
+ u32 bg_c1;
+ struct drm_property *bg_c2_prop;
+ u32 bg_c2;
+ struct drm_property *tpg_prop;
+ bool tpg_on;
+ struct drm_pending_vblank_event *event;
+ /* Don't operate directly on _ps_ */
+ struct clk *_ps_pclk;
+ struct clk *_pl_pclk;
+ struct clk *pclk;
+ bool pclk_en;
+ struct clk *_ps_audclk;
+ struct clk *_pl_audclk;
+ struct clk *audclk;
+ bool audclk_en;
+ struct clk *aclk;
+ bool aclk_en;
+};
+
+/**
+ * struct zynqmp_disp_fmt - Display subsystem format mapping
+ * @drm_fmt: drm format
+ * @disp_fmt: Display subsystem format
+ * @bus_fmt: Bus formats (live formats)
+ * @rgb: flag for RGB formats
+ * @swap: flag to swap r & b for rgb formats, and u & v for yuv formats
+ * @chroma_sub: flag for chroma subsampled formats
+ * @sf: scaling factors for upto 3 color components
+ */
+struct zynqmp_disp_fmt {
+ u32 drm_fmt;
+ u32 disp_fmt;
+ u32 bus_fmt;
+ bool rgb;
+ bool swap;
+ bool chroma_sub;
+ u32 sf[3];
+};
+
+static void zynqmp_disp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_disp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_disp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_disp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_disp_write(base, offset, zynqmp_disp_read(base, offset) | set);
+}
+
+/*
+ * Clock functions
+ */
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Enable the clock only if it's not enabled @flag.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ *flag = true;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable the clock if needed
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * Disable the clock only if it's enabled @flag.
+ */
+static void zynqmp_disp_clk_disable(struct clk *clk, bool *flag)
+{
+ if (*flag) {
+ clk_disable_unprepare(clk);
+ *flag = false;
+ }
+}
+
+/**
+ * zynqmp_disp_clk_enable - Enable and disable the clock
+ * @clk: clk device
+ * @flag: flag if the clock is enabled
+ *
+ * This is to ensure the clock is disabled. The initial hardware state is
+ * unknown, and this makes sure that the clock is disabled.
+ *
+ * Return: value from clk_prepare_enable().
+ */
+static int zynqmp_disp_clk_enable_disable(struct clk *clk, bool *flag)
+{
+ int ret = 0;
+
+ if (!*flag) {
+ ret = clk_prepare_enable(clk);
+ clk_disable_unprepare(clk);
+ }
+
+ return ret;
+}
+
+/*
+ * Blender functions
+ */
+
+/**
+ * zynqmp_disp_blend_set_output_fmt - Set the output format of the blend
+ * @blend: blend object
+ * @fmt: output format
+ *
+ * Set the output format to @fmt.
+ */
+static void
+zynqmp_disp_blend_set_output_fmt(struct zynqmp_disp_blend *blend, u32 fmt)
+{
+ u16 reset_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u32 reset_offsets[] = { 0x0, 0x0, 0x0 };
+ u16 sdtv_coeffs[] = { 0x4c9, 0x864, 0x1d3,
+ 0x7d4d, 0x7ab3, 0x800,
+ 0x800, 0x794d, 0x7eb3 };
+ u32 full_range_offsets[] = { 0x0, 0x8000000, 0x8000000 };
+ u16 *coeffs;
+ u32 *offsets;
+ u32 offset, i;
+
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
+ if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
+ coeffs = reset_coeffs;
+ offsets = reset_offsets;
+ } else {
+ /* Hardcode Full-range SDTV values. Can be runtime config */
+ coeffs = sdtv_coeffs;
+ offsets = full_range_offsets;
+ }
+
+ offset = ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF0;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_OUTCSC_OFFSET;
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_coeff - Set the coefficients for @layer
+ * @blend: blend object
+ * @layer: layer to set the coefficients for
+ * @on: if layer is on / off
+ *
+ * Depending on the format (rgb / yuv and swap), and the status (on / off),
+ * this function sets the coefficients for the given layer @layer accordingly.
+ */
+static void zynqmp_disp_blend_layer_coeff(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer,
+ bool on)
+{
+ u32 offset, i, s0, s1;
+ u16 sdtv_coeffs[] = { 0x1000, 0x166f, 0x0,
+ 0x1000, 0x7483, 0x7a7f,
+ 0x1000, 0x0, 0x1c5a };
+ u16 sdtv_coeffs_yonly[] = { 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000,
+ 0x0, 0x0, 0x1000 };
+ u16 swap_coeffs[] = { 0x1000, 0x0, 0x0,
+ 0x0, 0x1000, 0x0,
+ 0x0, 0x0, 0x1000 };
+ u16 null_coeffs[] = { 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0 };
+ u16 *coeffs;
+ u32 sdtv_offsets[] = { 0x0, 0x1800, 0x1800 };
+ u32 sdtv_offsets_yonly[] = { 0x1800, 0x1800, 0x0 };
+ u32 null_offsets[] = { 0x0, 0x0, 0x0 };
+ u32 *offsets;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF0;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF0;
+
+ if (!on) {
+ coeffs = null_coeffs;
+ offsets = null_offsets;
+ } else {
+ if (!layer->fmt->rgb) {
+ /*
+ * In case of Y_ONLY formats, pixels are unpacked
+ * differently compared to YCbCr
+ */
+ if (layer->fmt->drm_fmt == DRM_FORMAT_Y8 ||
+ layer->fmt->drm_fmt == DRM_FORMAT_Y10) {
+ coeffs = sdtv_coeffs_yonly;
+ offsets = sdtv_offsets_yonly;
+ } else {
+ coeffs = sdtv_coeffs;
+ offsets = sdtv_offsets;
+ }
+
+ s0 = 1;
+ s1 = 2;
+ } else {
+ coeffs = swap_coeffs;
+ s0 = 0;
+ s1 = 2;
+
+ /* No offset for RGB formats */
+ offsets = null_offsets;
+ }
+
+ if (layer->fmt->swap) {
+ for (i = 0; i < 3; i++) {
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ coeffs[i * 3 + s1] ^= coeffs[i * 3 + s0];
+ coeffs[i * 3 + s0] ^= coeffs[i * 3 + s1];
+ }
+ }
+ }
+
+ /* Program coefficients. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, coeffs[i]);
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID)
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN1CSC_OFFSET;
+ else
+ offset = ZYNQMP_DISP_V_BLEND_LUMA_IN2CSC_OFFSET;
+
+ /* Program offsets. Can be runtime configurable */
+ for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
+ zynqmp_disp_write(blend->base, offset + i * 4, offsets[i]);
+}
+
+/**
+ * zynqmp_disp_blend_layer_enable - Enable a layer
+ * @blend: blend object
+ * @layer: layer to enable
+ *
+ * Enable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = layer->fmt->rgb ? ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_RGB : 0;
+ reg |= layer->fmt->chroma_sub ?
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0;
+
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset,
+ reg);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, true);
+}
+
+/**
+ * zynqmp_disp_blend_layer_disable - Disable a layer
+ * @blend: blend object
+ * @layer: layer to disable
+ *
+ * Disable a layer @layer.
+ */
+static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
+ struct zynqmp_disp_layer *layer)
+{
+ zynqmp_disp_write(blend->base,
+ ZYNQMP_DISP_V_BLEND_LAYER_CONTROL + layer->offset, 0);
+
+ zynqmp_disp_blend_layer_coeff(blend, layer, false);
+}
+
+/**
+ * zynqmp_disp_blend_set_bg_color - Set the background color
+ * @blend: blend object
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color.
+ */
+static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp_blend *blend,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_0, c0);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_1, c1);
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_BG_CLR_2, c2);
+}
+
+/**
+ * zynqmp_disp_blend_set_alpha - Set the alpha for blending
+ * @blend: blend object
+ * @alpha: alpha value to be used
+ *
+ * Set the alpha for blending.
+ */
+static void
+zynqmp_disp_blend_set_alpha(struct zynqmp_disp_blend *blend, u32 alpha)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA);
+ reg &= ~ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MASK;
+ reg |= alpha << 1;
+ zynqmp_disp_write(blend->base, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
+ reg);
+}
+
+/**
+ * zynqmp_disp_blend_enable_alpha - Enable/disable the global alpha
+ * @blend: blend object
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Enable/disable the global alpha blending based on @enable.
+ */
+static void
+zynqmp_disp_blend_enable_alpha(struct zynqmp_disp_blend *blend, bool enable)
+{
+ if (enable)
+ zynqmp_disp_set(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+ else
+ zynqmp_disp_clr(blend->base,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA, BIT(0));
+}
+
+/* List of blend output formats */
+/* The id / order should be aligned with zynqmp_disp_color_enum */
+static const struct zynqmp_disp_fmt blend_output_fmts[] = {
+ {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR444,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YCBCR422,
+ }, {
+ .disp_fmt = ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_YONLY,
+ }
+};
+
+/*
+ * AV buffer manager functions
+ */
+
+/* List of video layer formats */
+#define ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV 2
+static const struct zynqmp_disp_fmt av_buf_vid_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_VYUY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_UYVY,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_VYUY,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUYV,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVYU,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YUYV,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU422,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV24,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV61,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y8,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MONO,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_Y10,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YONLY_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGBA8880,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XBGR2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XRGB2101010,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_RGB888_10,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YUV420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_YVU420,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV12,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_NV21,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420,
+ .rgb = false,
+ .swap = true,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV15,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_420_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_XV20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_VID_YV16CI_10,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/* List of graphics layer formats */
+static const struct zynqmp_disp_fmt av_buf_gfx_fmts[] = {
+ {
+ .drm_fmt = DRM_FORMAT_ABGR8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_ARGB8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA8888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_ABGR8888,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB888,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_BGR888,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA5551,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA5551,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGBA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGRA4444,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGBA4444,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_4BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_RGB565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }, {
+ .drm_fmt = DRM_FORMAT_BGR565,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_RGB565,
+ .rgb = true,
+ .swap = true,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_5BIT_SF,
+ }
+};
+
+/* List of live formats */
+/* Format can be combination of color, bpc, and cb-cr order.
+ * - Color: RGB / YUV444 / YUV422 / Y only
+ * - BPC: 6, 8, 10, 12
+ * - Swap: Cb and Cr swap
+ * which can be 32 bus formats. Only list the subset of those for now.
+ */
+static const struct zynqmp_disp_fmt av_buf_live_fmts[] = {
+ {
+ .bus_fmt = MEDIA_BUS_FMT_RGB666_1X18,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_6 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_6BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_RBG888_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_RGB,
+ .rgb = true,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY8_1X16,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_VUY8_1X24,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_8 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV444,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = false,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_8BIT_SF,
+ }, {
+ .bus_fmt = MEDIA_BUS_FMT_UYVY10_1X20,
+ .disp_fmt = ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_BPC_10 ||
+ ZYNQMP_DISP_AV_BUF_LIVE_CONFIG_FMT_YUV422,
+ .rgb = false,
+ .swap = false,
+ .chroma_sub = true,
+ .sf[0] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[1] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ .sf[2] = ZYNQMP_DISP_AV_BUF_10BIT_SF,
+ }
+};
+
+/**
+ * zynqmp_disp_av_buf_set_fmt - Set the input formats
+ * @av_buf: av buffer manager
+ * @fmt: formats
+ *
+ * Set the av buffer manager format to @fmt. @fmt should have valid values
+ * for both video and graphics layer.
+ */
+static void
+zynqmp_disp_av_buf_set_fmt(struct zynqmp_disp_av_buf *av_buf, u32 fmt)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_get_fmt - Get the input formats
+ * @av_buf: av buffer manager
+ *
+ * Get the input formats (which include video and graphics) of
+ * av buffer manager.
+ *
+ * Return: value of ZYNQMP_DISP_AV_BUF_FMT register.
+ */
+static u32
+zynqmp_disp_av_buf_get_fmt(struct zynqmp_disp_av_buf *av_buf)
+{
+ return zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_FMT);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_live_fmt - Set the live_input format
+ * @av_buf: av buffer manager
+ * @fmt: format
+ * @is_vid: if it's for video layer
+ *
+ * Set the live input format to @fmt. @fmt should have valid values.
+ * @vid will determine if it's for video layer or graphics layer
+ * @fmt should be a valid hardware value.
+ */
+static void zynqmp_disp_av_buf_set_live_fmt(struct zynqmp_disp_av_buf *av_buf,
+ u32 fmt, bool is_vid)
+{
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_CONFIG;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_CONFIG;
+
+ zynqmp_disp_write(av_buf->base, offset, fmt);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_clock_src - Set the video clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the video clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_clock_src_is_ps - if ps clock is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if ps clock is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_clock_src_is_ps(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_FROM_PS);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_vid_timing_src - Set the video timing source
+ * @av_buf: av buffer manager
+ * @internal: flag if the video timing is generated internally
+ *
+ * Set the video timing source based on @internal. It can come externally or
+ * be generated internally.
+ */
+static void
+zynqmp_disp_av_buf_set_vid_timing_src(struct zynqmp_disp_av_buf *av_buf,
+ bool internal)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (internal)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_vid_timing_src_is_int - if internal timing is used
+ * @av_buf: av buffer manager
+ *
+ * Return: if the internal timing is used
+ */
+static bool
+zynqmp_disp_av_buf_vid_timing_src_is_int(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ return !!(reg & ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_aud_clock_src - Set the audio clock source
+ * @av_buf: av buffer manager
+ * @from_ps: flag if the video clock is from ps
+ *
+ * Set the audio clock source based on @from_ps. It can come from either PS or
+ * PL.
+ */
+static void
+zynqmp_disp_av_buf_set_aud_clock_src(struct zynqmp_disp_av_buf *av_buf,
+ bool from_ps)
+{
+ u32 reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC);
+
+ if (from_ps)
+ reg |= ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_CLK_SRC_AUD_FROM_PS;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_CLK_SRC, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_buf - Enable buffers
+ * @av_buf: av buffer manager
+ *
+ * Enable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ reg |= ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_AUD_MAX <<
+ ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT;
+
+ for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_buf - Disable buffers
+ * @av_buf: av buffer manager
+ *
+ * Disable all (video and audio) buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_buf(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg, i;
+
+ reg = ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH & ~ZYNQMP_DISP_AV_BUF_CHBUF_EN;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
+ zynqmp_disp_write(av_buf->base,
+ ZYNQMP_DISP_AV_BUF_CHBUF + i * 4, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_aud - Enable audio
+ * @av_buf: av buffer manager
+ *
+ * Enable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_enable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable - Enable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * De-assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_enable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable - Disable the video pipe
+ * @av_buf: av buffer manager
+ *
+ * Assert the video pipe reset
+ */
+static void
+zynqmp_disp_av_buf_disable(struct zynqmp_disp_av_buf *av_buf)
+{
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_SRST_REG,
+ ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_aud - Disable audio
+ * @av_buf: av buffer manager
+ *
+ * Disable all audio buffers.
+ */
+static void
+zynqmp_disp_av_buf_disable_aud(struct zynqmp_disp_av_buf *av_buf)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_set_tpg - Set TPG mode
+ * @av_buf: av buffer manager
+ * @tpg_on: if TPG should be on
+ *
+ * Set the TPG mode based on @tpg_on.
+ */
+static void zynqmp_disp_av_buf_set_tpg(struct zynqmp_disp_av_buf *av_buf,
+ bool tpg_on)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (tpg_on)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ else
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_PATTERN;
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_enable_vid - Enable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to enable
+ * @mode: operation mode of layer
+ *
+ * Enable the video/graphics buffer for @layer.
+ */
+static void zynqmp_disp_av_buf_enable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_LIVE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MEM;
+ else
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_disable_vid - Disable the video layer buffer
+ * @av_buf: av buffer manager
+ * @layer: layer to disable
+ *
+ * Disable the video/graphics buffer for @layer.
+ */
+static void
+zynqmp_disp_av_buf_disable_vid(struct zynqmp_disp_av_buf *av_buf,
+ struct zynqmp_disp_layer *layer)
+{
+ u32 reg;
+
+ reg = zynqmp_disp_read(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
+ } else {
+ reg &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
+ reg |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
+ }
+ zynqmp_disp_write(av_buf->base, ZYNQMP_DISP_AV_BUF_OUTPUT, reg);
+}
+
+/**
+ * zynqmp_disp_av_buf_init_sf - Initialize scaling factors
+ * @av_buf: av buffer manager
+ * @vid_fmt: video format descriptor
+ * @gfx_fmt: graphics format descriptor
+ *
+ * Initialize scaling factors for both video and graphics layers.
+ * If the format descriptor is NULL, the function skips the programming.
+ */
+static void zynqmp_disp_av_buf_init_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *vid_fmt,
+ const struct zynqmp_disp_fmt *gfx_fmt)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (gfx_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_GFX_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ gfx_fmt->sf[i]);
+ }
+
+ if (vid_fmt) {
+ offset = ZYNQMP_DISP_AV_BUF_VID_COMP0_SF;
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ vid_fmt->sf[i]);
+ }
+}
+
+/**
+ * zynqmp_disp_av_buf_init_live_sf - Initialize scaling factors for live source
+ * @av_buf: av buffer manager
+ * @fmt: format descriptor
+ * @is_vid: flag if this is for video layer
+ *
+ * Initialize scaling factors for live source.
+ */
+static void zynqmp_disp_av_buf_init_live_sf(struct zynqmp_disp_av_buf *av_buf,
+ const struct zynqmp_disp_fmt *fmt,
+ bool is_vid)
+{
+ unsigned int i;
+ u32 offset;
+
+ if (is_vid)
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_VID_COMP0_SF;
+ else
+ offset = ZYNQMP_DISP_AV_BUF_LIVE_GFX_COMP0_SF;
+
+ for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++)
+ zynqmp_disp_write(av_buf->base, offset + i * 4,
+ fmt->sf[i]);
+}
+
+/*
+ * Audio functions
+ */
+
+/**
+ * zynqmp_disp_aud_init - Initialize the audio
+ * @aud: audio
+ *
+ * Initialize the audio with default mixer volume. The de-assertion will
+ * initialize the audio states.
+ */
+static void zynqmp_disp_aud_init(struct zynqmp_disp_aud *aud)
+{
+ /* Clear the audio soft reset register as it's an non-reset flop */
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+ zynqmp_disp_write(aud->base, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
+}
+
+/**
+ * zynqmp_disp_aud_deinit - De-initialize the audio
+ * @aud: audio
+ *
+ * Put the audio in reset.
+ */
+static void zynqmp_disp_aud_deinit(struct zynqmp_disp_aud *aud)
+{
+ zynqmp_disp_set(aud->base, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+}
+
+/*
+ * ZynqMP Display layer functions
+ */
+
+/**
+ * zynqmp_disp_layer_check_size - Verify width and height for the layer
+ * @disp: Display subsystem
+ * @layer: layer
+ * @width: width
+ * @height: height
+ *
+ * The Display subsystem has the limitation that both layers should have
+ * identical size. This function stores width and height of @layer, and verifies
+ * if the size (width and height) is valid.
+ *
+ * Return: 0 on success, or -EINVAL if width or/and height is invalid.
+ */
+static int zynqmp_disp_layer_check_size(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 width, u32 height)
+{
+ struct zynqmp_disp_layer *other = layer->other;
+
+ if (other->enabled && (other->w != width || other->h != height)) {
+ dev_err(disp->dev, "Layer width:height must be %d:%d\n",
+ other->w, other->h);
+ return -EINVAL;
+ }
+
+ layer->w = width;
+ layer->h = height;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_fmt - Find the Display subsystem format for given drm format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @drm_fmt: DRM format to search
+ *
+ * Search a Display subsystem format corresponding to the given DRM format
+ * @drm_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t drm_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].drm_fmt == drm_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_fmt - Set the format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @drm_fmt: DRM format to set
+ *
+ * Set the format of the given layer to @drm_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @drm_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t drm_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ const struct zynqmp_disp_fmt *vid_fmt = NULL, *gfx_fmt = NULL;
+ u32 size, fmts, mask;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_VID) {
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_vid_fmts, size, drm_fmt);
+ vid_fmt = fmt;
+ } else {
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ mask = ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
+ fmt = zynqmp_disp_map_fmt(av_buf_gfx_fmts, size, drm_fmt);
+ gfx_fmt = fmt;
+ }
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmts = zynqmp_disp_av_buf_get_fmt(&disp->av_buf);
+ fmts &= mask;
+ fmts |= fmt->disp_fmt;
+ zynqmp_disp_av_buf_set_fmt(&disp->av_buf, fmts);
+ zynqmp_disp_av_buf_init_sf(&disp->av_buf, vid_fmt, gfx_fmt);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_map_live_fmt - Find the hardware format for given bus format
+ * @fmts: format table to look up
+ * @size: size of the table @fmts
+ * @bus_fmt: bus format to search
+ *
+ * Search a Display subsystem format corresponding to the given bus format
+ * @bus_fmt, and return the format descriptor which contains the Display
+ * subsystem format value.
+ *
+ * Return: a Display subsystem format descriptor on success, or NULL.
+ */
+static const struct zynqmp_disp_fmt *
+zynqmp_disp_map_live_fmt(const struct zynqmp_disp_fmt fmts[],
+ unsigned int size, uint32_t bus_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (fmts[i].bus_fmt == bus_fmt)
+ return &fmts[i];
+
+ return NULL;
+}
+
+/**
+ * zynqmp_disp_set_live_fmt - Set the live format of the layer
+ * @disp: Display subsystem
+ * @layer: layer to set the format
+ * @bus_fmt: bus format to set
+ *
+ * Set the live format of the given layer to @live_fmt.
+ *
+ * Return: 0 on success. -EINVAL if @bus_fmt is not supported by the layer.
+ */
+static int zynqmp_disp_layer_set_live_fmt(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ uint32_t bus_fmt)
+{
+ const struct zynqmp_disp_fmt *fmt;
+ u32 size;
+ bool is_vid = layer->id == ZYNQMP_DISP_LAYER_VID;
+
+ size = ARRAY_SIZE(av_buf_live_fmts);
+ fmt = zynqmp_disp_map_live_fmt(av_buf_live_fmts, size, bus_fmt);
+ if (!fmt)
+ return -EINVAL;
+
+ zynqmp_disp_av_buf_set_live_fmt(&disp->av_buf, fmt->disp_fmt, is_vid);
+ zynqmp_disp_av_buf_init_live_sf(&disp->av_buf, fmt, is_vid);
+ layer->fmt = fmt;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_set_tpg - Enable or disable TPG
+ * @disp: Display subsystem
+ * @layer: Video layer
+ * @tpg_on: flag if TPG needs to be enabled or disabled
+ *
+ * Enable / disable the TPG mode on the video layer @layer depending on
+ * @tpg_on. The video layer should be disabled prior to enable request.
+ *
+ * Return: 0 on success. -ENODEV if it's not video layer. -EIO if
+ * the video layer is enabled.
+ */
+static int zynqmp_disp_layer_set_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ bool tpg_on)
+{
+ if (layer->id != ZYNQMP_DISP_LAYER_VID) {
+ dev_err(disp->dev,
+ "only the video layer has the tpg mode\n");
+ return -ENODEV;
+ }
+
+ if (layer->enabled) {
+ dev_err(disp->dev,
+ "the video layer should be disabled for tpg mode\n");
+ return -EIO;
+ }
+
+ zynqmp_disp_blend_layer_coeff(&disp->blend, layer, tpg_on);
+ zynqmp_disp_av_buf_set_tpg(&disp->av_buf, tpg_on);
+ disp->tpg_on = tpg_on;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_get_tpg - Get the TPG mode status
+ * @disp: Display subsystem
+ * @layer: Video layer
+ *
+ * Return if the TPG is enabled or not.
+ *
+ * Return: true if TPG is on, otherwise false
+ */
+static bool zynqmp_disp_layer_get_tpg(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ return disp->tpg_on;
+}
+
+/**
+ * zynqmp_disp_get_fmt - Get the supported DRM formats of the layer
+ * @disp: Display subsystem
+ * @layer: layer to get the formats
+ * @drm_fmts: pointer to array of DRM format strings
+ * @num_fmts: pointer to number of returned DRM formats
+ *
+ * Get the supported DRM formats of the given layer.
+ */
+static void zynqmp_disp_layer_get_fmts(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ u32 **drm_fmts, unsigned int *num_fmts)
+{
+ *drm_fmts = layer->drm_fmts;
+ *num_fmts = layer->num_fmts;
+}
+
+/**
+ * zynqmp_disp_layer_enable - Enable the layer
+ * @disp: Display subsystem
+ * @layer: layer to esable
+ * @mode: operation mode
+ *
+ * Enable the layer @layer.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+static int zynqmp_disp_layer_enable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_ctrl_flags flags;
+ unsigned int i;
+
+ if (layer->enabled && layer->mode != mode) {
+ dev_err(dev, "layer is already enabled in different mode\n");
+ return -EBUSY;
+ }
+
+ zynqmp_disp_av_buf_enable_vid(&disp->av_buf, layer, mode);
+ zynqmp_disp_blend_layer_enable(&disp->blend, layer);
+
+ layer->enabled = true;
+ layer->mode = mode;
+
+ if (mode == ZYNQMP_DISP_LAYER_LIVE)
+ return 0;
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++) {
+ struct zynqmp_disp_layer_dma *dma = &layer->dma[i];
+
+ if (dma->chan && dma->is_active) {
+ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_interleaved_dma(dma->chan,
+ &dma->xt, flags);
+ if (!desc) {
+ dev_err(dev, "failed to prep DMA descriptor\n");
+ return -ENOMEM;
+ }
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_disable - Disable the layer
+ * @disp: Display subsystem
+ * @layer: layer to disable
+ * @mode: operation mode
+ *
+ * Disable the layer @layer.
+ *
+ * Return: 0 on success, or -EBUSY if the layer is in different mode.
+ */
+static int zynqmp_disp_layer_disable(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer,
+ enum zynqmp_disp_layer_mode mode)
+{
+ struct device *dev = disp->dev;
+ unsigned int i;
+
+ if (layer->mode != mode) {
+ dev_err(dev, "the layer is operating in different mode\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ if (layer->dma[i].chan && layer->dma[i].is_active)
+ dmaengine_terminate_sync(layer->dma[i].chan);
+
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ zynqmp_disp_blend_layer_disable(&disp->blend, layer);
+ layer->enabled = false;
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_request_dma - Request DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to request DMA channels
+ * @name: identifier string for layer type
+ *
+ * Request DMA engine channels for corresponding layer.
+ *
+ * Return: 0 on success, or err value from of_dma_request_slave_channel().
+ */
+static int
+zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer, const char *name)
+{
+ struct zynqmp_disp_layer_dma *dma;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ char temp[16];
+
+ dma = &layer->dma[i];
+ snprintf(temp, sizeof(temp), "%s%d", name, i);
+ dma->chan = of_dma_request_slave_channel(layer->of_node,
+ temp);
+ if (IS_ERR(dma->chan)) {
+ dev_err(disp->dev, "failed to request dma channel\n");
+ ret = PTR_ERR(dma->chan);
+ dma->chan = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_disp_layer_release_dma - Release DMA channels for a layer
+ * @disp: Display subsystem
+ * @layer: layer to release DMA channels
+ *
+ * Release the dma channels associated with @layer.
+ */
+static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
+ struct zynqmp_disp_layer *layer)
+{
+ unsigned int i;
+
+ for (i = 0; i < layer->num_chan; i++) {
+ if (layer->dma[i].chan) {
+ /* Make sure the channel is terminated before release */
+ dmaengine_terminate_all(layer->dma[i].chan);
+ dma_release_channel(layer->dma[i].chan);
+ }
+ }
+}
+
+/**
+ * zynqmp_disp_layer_is_live - if any layer is live
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is live
+ */
+static bool zynqmp_disp_layer_is_live(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ if (disp->layers[i].enabled &&
+ disp->layers[i].mode == ZYNQMP_DISP_LAYER_LIVE)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_is_enabled - if any layer is enabled
+ * @disp: Display subsystem
+ *
+ * Return: true if any layer is enabled
+ */
+static bool zynqmp_disp_layer_is_enabled(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ if (disp->layers[i].enabled)
+ return true;
+
+ return false;
+}
+
+/**
+ * zynqmp_disp_layer_destroy - Destroy all layers
+ * @disp: Display subsystem
+ *
+ * Destroy all layers.
+ */
+static void zynqmp_disp_layer_destroy(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ zynqmp_disp_layer_release_dma(disp, &disp->layers[i]);
+ if (disp->layers[i].of_node)
+ of_node_put(disp->layers[i].of_node);
+ }
+}
+
+/**
+ * zynqmp_disp_layer_create - Create all layers
+ * @disp: Display subsystem
+ *
+ * Create all layers.
+ *
+ * Return: 0 on success, otherwise error code from failed function
+ */
+static int zynqmp_disp_layer_create(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ int num_chans[ZYNQMP_DISP_NUM_LAYERS] = { 3, 1 };
+ const char * const dma_name[] = { "vid", "gfx" };
+ int ret;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ char temp[16];
+
+ layer = &disp->layers[i];
+ layer->id = i;
+ layer->offset = i * 4;
+ layer->other = &disp->layers[!i];
+ layer->num_chan = num_chans[i];
+ snprintf(temp, sizeof(temp), "%s-layer", dma_name[i]);
+ layer->of_node = of_get_child_by_name(disp->dev->of_node, temp);
+ if (!layer->of_node)
+ goto err;
+ ret = zynqmp_disp_layer_request_dma(disp, layer, dma_name[i]);
+ if (ret)
+ goto err;
+ layer->disp = disp;
+ }
+
+ return 0;
+
+err:
+ zynqmp_disp_layer_destroy(disp);
+ return ret;
+}
+
+/*
+ * ZynqMP Display internal functions
+ */
+
+/*
+ * Output format enumeration.
+ * The ID should be aligned with blend_output_fmts.
+ * The string should be aligned with how zynqmp_dp_set_color() decodes.
+ */
+static struct drm_prop_enum_list zynqmp_disp_color_enum[] = {
+ { 0, "rgb" },
+ { 1, "ycrcb444" },
+ { 2, "ycrcb422" },
+ { 3, "yonly" },
+};
+
+/**
+ * zynqmp_disp_set_output_fmt - Set the output format
+ * @disp: Display subsystem
+ * @id: the format ID. Refer to zynqmp_disp_color_enum[].
+ *
+ * This function sets the output format of the display / blender as well as
+ * the format of DP controller. The @id should be aligned with
+ * zynqmp_disp_color_enum.
+ */
+static void
+zynqmp_disp_set_output_fmt(struct zynqmp_disp *disp, unsigned int id)
+{
+ const struct zynqmp_disp_fmt *fmt = &blend_output_fmts[id];
+
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[id].name);
+ zynqmp_disp_blend_set_output_fmt(&disp->blend, fmt->disp_fmt);
+}
+
+/**
+ * zynqmp_disp_set_bg_color - Set the background color
+ * @disp: Display subsystem
+ * @c0: color component 0
+ * @c1: color component 1
+ * @c2: color component 2
+ *
+ * Set the background color with given color components (@c0, @c1, @c2).
+ */
+static void zynqmp_disp_set_bg_color(struct zynqmp_disp *disp,
+ u32 c0, u32 c1, u32 c2)
+{
+ zynqmp_disp_blend_set_bg_color(&disp->blend, c0, c1, c2);
+}
+
+/**
+ * zynqmp_disp_set_alpha - Set the alpha value
+ * @disp: Display subsystem
+ * @alpha: alpha value to set
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_alpha(struct zynqmp_disp *disp, u32 alpha)
+{
+ disp->alpha = alpha;
+ zynqmp_disp_blend_set_alpha(&disp->blend, alpha);
+}
+
+/**
+ * zynqmp_disp_get_alpha - Get the alpha value
+ * @disp: Display subsystem
+ *
+ * Get the alpha value for blending.
+ *
+ * Return: current alpha value.
+ */
+static u32 zynqmp_disp_get_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha;
+}
+
+/**
+ * zynqmp_disp_set_g_alpha - Enable/disable the global alpha blending
+ * @disp: Display subsystem
+ * @enable: flag to enable or disable alpha blending
+ *
+ * Set the alpha value for blending.
+ */
+static void zynqmp_disp_set_g_alpha(struct zynqmp_disp *disp, bool enable)
+{
+ disp->alpha_en = enable;
+ zynqmp_disp_blend_enable_alpha(&disp->blend, enable);
+}
+
+/**
+ * zynqmp_disp_get_g_alpha - Get the global alpha status
+ * @disp: Display subsystem
+ *
+ * Get the global alpha statue.
+ *
+ * Return: true if global alpha is enabled, or false.
+ */
+static bool zynqmp_disp_get_g_alpha(struct zynqmp_disp *disp)
+{
+ return disp->alpha_en;
+}
+
+/**
+ * zynqmp_disp_enable - Enable the Display subsystem
+ * @disp: Display subsystem
+ *
+ * Enable the Display subsystem.
+ */
+static void zynqmp_disp_enable(struct zynqmp_disp *disp)
+{
+ bool live;
+
+ if (disp->enabled)
+ return;
+
+ zynqmp_disp_av_buf_enable(&disp->av_buf);
+ /* Choose clock source based on the DT clock handle */
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, !!disp->_ps_pclk);
+ zynqmp_disp_av_buf_set_aud_clock_src(&disp->av_buf, !!disp->_ps_audclk);
+ live = zynqmp_disp_layer_is_live(disp);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, !live);
+ zynqmp_disp_av_buf_enable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_enable_aud(&disp->av_buf);
+ zynqmp_disp_aud_init(&disp->aud);
+ disp->enabled = true;
+}
+
+/**
+ * zynqmp_disp_disable - Disable the Display subsystem
+ * @disp: Display subsystem
+ * @force: flag to disable forcefully
+ *
+ * Disable the Display subsystem.
+ */
+static void zynqmp_disp_disable(struct zynqmp_disp *disp, bool force)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ if (!force && (!disp->enabled || zynqmp_disp_layer_is_enabled(disp)))
+ return;
+
+ zynqmp_disp_aud_deinit(&disp->aud);
+ zynqmp_disp_av_buf_disable_aud(&disp->av_buf);
+ zynqmp_disp_av_buf_disable_buf(&disp->av_buf);
+ zynqmp_disp_av_buf_disable(&disp->av_buf);
+
+ /* Mark the flip is done as crtc is disabled anyway */
+ if (crtc->state->event) {
+ complete_all(crtc->state->event->base.completion);
+ crtc->state->event = NULL;
+ }
+
+ disp->enabled = false;
+}
+
+/**
+ * zynqmp_disp_init - Initialize the Display subsystem states
+ * @disp: Display subsystem
+ *
+ * Some states are not initialized as desired. For example, the output select
+ * register resets to the live source. This function is to initialize
+ * some register states as desired.
+ */
+static void zynqmp_disp_init(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_av_buf_disable_vid(&disp->av_buf, layer);
+ }
+}
+
+/*
+ * ZynqMP Display external functions for zynqmp_dp
+ */
+
+/**
+ * zynqmp_disp_handle_vblank - Handle the vblank event
+ * @disp: Display subsystem
+ *
+ * This function handles the vblank interrupt, and sends an event to
+ * CRTC object. This will be called by the DP vblank interrupt handler.
+ */
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp)
+{
+ struct drm_crtc *crtc = &disp->xlnx_crtc.crtc;
+
+ drm_crtc_handle_vblank(crtc);
+}
+
+/**
+ * zynqmp_disp_get_apb_clk_rate - Get the current APB clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current APB clock rate.
+ */
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp)
+{
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_aud_enabled - If the audio is enabled
+ * @disp: Display subsystem
+ *
+ * Return if the audio is enabled depending on the audio clock.
+ *
+ * Return: true if audio is enabled, or false.
+ */
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp)
+{
+ return !!disp->audclk;
+}
+
+/**
+ * zynqmp_disp_get_aud_clk_rate - Get the current audio clock rate
+ * @disp: Display subsystem
+ *
+ * Return: the current audio clock rate.
+ */
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp)
+{
+ if (zynqmp_disp_aud_enabled(disp))
+ return 0;
+ return clk_get_rate(disp->aclk);
+}
+
+/**
+ * zynqmp_disp_get_crtc_mask - Return the CRTC bit mask
+ * @disp: Display subsystem
+ *
+ * Return: the crtc mask of the zyqnmp_disp CRTC.
+ */
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp)
+{
+ return drm_crtc_mask(&disp->xlnx_crtc.crtc);
+}
+
+/*
+ * Xlnx bridge functions
+ */
+
+static inline struct zynqmp_disp_layer
+*bridge_to_layer(struct xlnx_bridge *bridge)
+{
+ return container_of(bridge, struct zynqmp_disp_layer, bridge);
+}
+
+static int zynqmp_disp_bridge_enable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ if (!disp->_pl_pclk) {
+ dev_err(disp->dev, "PL clock is required for live\n");
+ return -ENODEV;
+ }
+
+ ret = zynqmp_disp_layer_check_size(disp, layer, layer->w, layer->h);
+ if (ret)
+ return ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_LIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ if (zynqmp_disp_av_buf_vid_timing_src_is_int(&disp->av_buf) ||
+ zynqmp_disp_av_buf_vid_clock_src_is_ps(&disp->av_buf)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, false);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, false);
+ }
+
+ zynqmp_disp_enable(disp);
+
+ return 0;
+}
+
+static void zynqmp_disp_bridge_disable(struct xlnx_bridge *bridge)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_disable(disp, false);
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_LIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ if (!zynqmp_disp_layer_is_live(disp)) {
+ dev_info(disp->dev,
+ "Disabling the pipeline to change the clk/timing src");
+ zynqmp_disp_disable(disp, true);
+ zynqmp_disp_av_buf_set_vid_clock_src(&disp->av_buf, true);
+ zynqmp_disp_av_buf_set_vid_timing_src(&disp->av_buf, true);
+ if (zynqmp_disp_layer_is_enabled(disp))
+ zynqmp_disp_enable(disp);
+ }
+}
+
+static int zynqmp_disp_bridge_set_input(struct xlnx_bridge *bridge,
+ u32 width, u32 height, u32 bus_fmt)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+ int ret;
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, width, height);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_disp_layer_set_live_fmt(layer->disp, layer, bus_fmt);
+ if (ret)
+ dev_err(layer->disp->dev, "failed to set live fmt\n");
+
+ return ret;
+}
+
+static int zynqmp_disp_bridge_get_input_fmts(struct xlnx_bridge *bridge,
+ const u32 **fmts, u32 *count)
+{
+ struct zynqmp_disp_layer *layer = bridge_to_layer(bridge);
+
+ *fmts = layer->bus_fmts;
+ *count = layer->num_bus_fmts;
+
+ return 0;
+}
+
+/*
+ * DRM plane functions
+ */
+
+static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct zynqmp_disp_layer, plane);
+}
+
+static int zynqmp_disp_plane_enable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret;
+
+ zynqmp_disp_set_g_alpha(disp, disp->alpha_en);
+ zynqmp_disp_set_alpha(disp, disp->alpha);
+ ret = zynqmp_disp_layer_enable(layer->disp, layer,
+ ZYNQMP_DISP_LAYER_NONLIVE);
+ if (ret)
+ return ret;
+
+ if (layer->id == ZYNQMP_DISP_LAYER_GFX && disp->tpg_on) {
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+ }
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_disable(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+
+ zynqmp_disp_layer_disable(disp, layer, ZYNQMP_DISP_LAYER_NONLIVE);
+ if (layer->id == ZYNQMP_DISP_LAYER_VID && disp->tpg_on)
+ zynqmp_disp_layer_set_tpg(disp, layer, disp->tpg_on);
+
+ return 0;
+}
+
+static int zynqmp_disp_plane_mode_set(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ const struct drm_format_info *info = fb->format;
+ struct device *dev = layer->disp->dev;
+ dma_addr_t paddr;
+ unsigned int i;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "No format info found\n");
+ return -EINVAL;
+ }
+
+ ret = zynqmp_disp_layer_check_size(layer->disp, layer, src_w, src_h);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = src_w / (i ? info->hsub : 1);
+ unsigned int height = src_h / (i ? info->vsub : 1);
+ int width_bytes;
+
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state, i);
+ if (!paddr) {
+ dev_err(dev, "failed to get a paddr\n");
+ return -EINVAL;
+ }
+
+ layer->dma[i].xt.numf = height;
+ width_bytes = drm_format_plane_width_bytes(info, i, width);
+ layer->dma[i].sgl[0].size = width_bytes;
+ layer->dma[i].sgl[0].icg = fb->pitches[i] -
+ layer->dma[i].sgl[0].size;
+ layer->dma[i].xt.src_start = paddr;
+ layer->dma[i].xt.frame_size = 1;
+ layer->dma[i].xt.dir = DMA_MEM_TO_DEV;
+ layer->dma[i].xt.src_sgl = true;
+ layer->dma[i].xt.dst_sgl = false;
+ layer->dma[i].is_active = true;
+ }
+
+ for (; i < ZYNQMP_DISP_MAX_NUM_SUB_PLANES; i++)
+ layer->dma[i].is_active = false;
+
+ ret = zynqmp_disp_layer_set_fmt(layer->disp, layer, info->format);
+ if (ret)
+ dev_err(dev, "failed to set dp_sub layer fmt\n");
+
+ return ret;
+}
+
+static void zynqmp_disp_plane_destroy(struct drm_plane *plane)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+
+ xlnx_bridge_unregister(&layer->bridge);
+ drm_plane_cleanup(plane);
+}
+
+static int
+zynqmp_disp_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property, u64 val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ zynqmp_disp_set_alpha(disp, val);
+ else if (property == disp->g_alpha_en_prop)
+ zynqmp_disp_set_g_alpha(disp, val);
+ else if (property == disp->tpg_prop)
+ ret = zynqmp_disp_layer_set_tpg(disp, layer, val);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp_layer *layer = plane_to_layer(plane);
+ struct zynqmp_disp *disp = layer->disp;
+ int ret = 0;
+
+ if (property == disp->g_alpha_prop)
+ *val = zynqmp_disp_get_alpha(disp);
+ else if (property == disp->g_alpha_en_prop)
+ *val = zynqmp_disp_get_g_alpha(disp);
+ else if (property == disp->tpg_prop)
+ *val = zynqmp_disp_layer_get_tpg(disp, layer);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+zynqmp_disp_plane_atomic_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret)
+ goto fail;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+
+ if (plane == crtc->cursor)
+ state->legacy_cursor_update = true;
+
+ /* Do async-update if possible */
+ state->async_update = !drm_atomic_helper_async_check(plane->dev, state);
+ ret = drm_atomic_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
+
+static struct drm_plane_funcs zynqmp_disp_plane_funcs = {
+ .update_plane = zynqmp_disp_plane_atomic_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .atomic_set_property = zynqmp_disp_plane_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_plane_atomic_get_property,
+ .destroy = zynqmp_disp_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void
+zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ int ret;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ if (plane->state->fb == old_state->fb &&
+ plane->state->crtc_x == old_state->crtc_x &&
+ plane->state->crtc_y == old_state->crtc_y &&
+ plane->state->crtc_w == old_state->crtc_w &&
+ plane->state->crtc_h == old_state->crtc_h &&
+ plane->state->src_x == old_state->src_x &&
+ plane->state->src_y == old_state->src_y &&
+ plane->state->src_w == old_state->src_w &&
+ plane->state->src_h == old_state->src_h)
+ return;
+
+ if (old_state->fb &&
+ old_state->fb->format->format != plane->state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static void
+zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ zynqmp_disp_plane_disable(plane);
+}
+
+static int zynqmp_disp_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void
+zynqmp_disp_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ int ret;
+
+ if (plane->state->fb == new_state->fb)
+ return;
+
+ if (plane->state->fb &&
+ plane->state->fb->format->format != new_state->fb->format->format)
+ zynqmp_disp_plane_disable(plane);
+
+ /* Update the current state with new configurations */
+ swap(plane->state->fb, new_state->fb);
+ plane->state->crtc = new_state->crtc;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->state = new_state->state;
+
+ ret = zynqmp_disp_plane_mode_set(plane, plane->state->fb,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
+ if (ret)
+ return;
+
+ zynqmp_disp_plane_enable(plane);
+}
+
+static const struct drm_plane_helper_funcs zynqmp_disp_plane_helper_funcs = {
+ .atomic_update = zynqmp_disp_plane_atomic_update,
+ .atomic_disable = zynqmp_disp_plane_atomic_disable,
+ .atomic_async_check = zynqmp_disp_plane_atomic_async_check,
+ .atomic_async_update = zynqmp_disp_plane_atomic_async_update,
+};
+
+static int zynqmp_disp_create_plane(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ unsigned int i;
+ u32 *fmts = NULL;
+ unsigned int num_fmts = 0;
+ enum drm_plane_type type;
+ int ret;
+
+ /* graphics layer is primary, and video layer is overaly */
+ type = DRM_PLANE_TYPE_OVERLAY;
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ zynqmp_disp_layer_get_fmts(disp, layer, &fmts, &num_fmts);
+ ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
+ &zynqmp_disp_plane_funcs, fmts,
+ num_fmts, NULL, type, NULL);
+ if (ret)
+ goto err_plane;
+ drm_plane_helper_add(&layer->plane,
+ &zynqmp_disp_plane_helper_funcs);
+ type = DRM_PLANE_TYPE_PRIMARY;
+ }
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++) {
+ layer = &disp->layers[i];
+ layer->bridge.enable = &zynqmp_disp_bridge_enable;
+ layer->bridge.disable = &zynqmp_disp_bridge_disable;
+ layer->bridge.set_input = &zynqmp_disp_bridge_set_input;
+ layer->bridge.get_input_fmts =
+ &zynqmp_disp_bridge_get_input_fmts;
+ layer->bridge.of_node = layer->of_node;
+ xlnx_bridge_register(&layer->bridge);
+ }
+
+ /* Attach properties to each layers */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_prop,
+ ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX);
+ disp->alpha = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ /* Enable the global alpha as default */
+ drm_object_attach_property(&layer->plane.base, disp->g_alpha_en_prop,
+ true);
+ disp->alpha_en = true;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ drm_object_attach_property(&layer->plane.base, disp->tpg_prop, false);
+
+ return ret;
+
+err_plane:
+ if (i)
+ drm_plane_cleanup(&disp->layers[0].plane);
+ return ret;
+}
+
+static void zynqmp_disp_destroy_plane(struct zynqmp_disp *disp)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ zynqmp_disp_plane_destroy(&disp->layers[i].plane);
+}
+
+/*
+ * Xlnx crtc functions
+ */
+
+static inline struct zynqmp_disp *xlnx_crtc_to_disp(struct xlnx_crtc *xlnx_crtc)
+{
+ return container_of(xlnx_crtc, struct zynqmp_disp, xlnx_crtc);
+}
+
+static int zynqmp_disp_get_max_width(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_WIDTH;
+}
+
+static int zynqmp_disp_get_max_height(struct xlnx_crtc *xlnx_crtc)
+{
+ return ZYNQMP_DISP_MAX_HEIGHT;
+}
+
+static uint32_t zynqmp_disp_get_format(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+
+ return disp->layers[ZYNQMP_DISP_LAYER_GFX].fmt->drm_fmt;
+}
+
+static unsigned int zynqmp_disp_get_align(struct xlnx_crtc *xlnx_crtc)
+{
+ struct zynqmp_disp *disp = xlnx_crtc_to_disp(xlnx_crtc);
+ struct zynqmp_disp_layer *layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+
+ return 1 << layer->dma->chan->device->copy_align;
+}
+
+static u64 zynqmp_disp_get_dma_mask(struct xlnx_crtc *xlnx_crtc)
+{
+ return DMA_BIT_MASK(ZYNQMP_DISP_MAX_DMA_BIT);
+}
+
+/*
+ * DRM crtc functions
+ */
+
+static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
+{
+ struct xlnx_crtc *xlnx_crtc = to_xlnx_crtc(crtc);
+
+ return xlnx_crtc_to_disp(xlnx_crtc);
+}
+
+static int zynqmp_disp_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ unsigned long rate;
+ long diff;
+ int ret;
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ ret = clk_set_rate(disp->pclk, adjusted_mode->clock * 1000);
+ if (ret) {
+ dev_err(disp->dev, "failed to set a pixel clock\n");
+ return ret;
+ }
+
+ rate = clk_get_rate(disp->pclk);
+ diff = rate - adjusted_mode->clock * 1000;
+ if (abs(diff) > (adjusted_mode->clock * 1000) / 20) {
+ dev_info(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ } else {
+ dev_dbg(disp->dev, "request pixel rate: %d actual rate: %lu\n",
+ adjusted_mode->clock, rate);
+ }
+
+ /* The timing register should be programmed always */
+ zynqmp_dp_encoder_mode_set_stream(disp->dpsub->dp, adjusted_mode);
+
+ return 0;
+}
+
+static void
+zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
+ int ret, vrefresh;
+
+ zynqmp_disp_crtc_mode_set(crtc, &crtc->state->mode,
+ adjusted_mode, crtc->x, crtc->y, NULL);
+
+ pm_runtime_get_sync(disp->dev);
+ ret = zynqmp_disp_clk_enable(disp->pclk, &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable a pixel clock\n");
+ return;
+ }
+ zynqmp_disp_set_output_fmt(disp, disp->color);
+ zynqmp_disp_set_bg_color(disp, disp->bg_c0, disp->bg_c1, disp->bg_c2);
+ zynqmp_disp_enable(disp);
+ /* Delay of 3 vblank intervals for timing gen to be stable */
+ vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+ msleep(3 * 1000 / vrefresh);
+}
+
+static void
+zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ zynqmp_disp_plane_disable(crtc->primary);
+ zynqmp_disp_disable(disp, true);
+ drm_crtc_vblank_off(crtc);
+ pm_runtime_put_sync(disp->dev);
+}
+
+static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ return drm_atomic_add_affected_planes(state->state, crtc);
+}
+
+static void
+zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
+ /* Don't rely on vblank when disabling crtc */
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ /* Consume the flip_done event from atomic helper */
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static struct drm_crtc_helper_funcs zynqmp_disp_crtc_helper_funcs = {
+ .atomic_enable = zynqmp_disp_crtc_atomic_enable,
+ .atomic_disable = zynqmp_disp_crtc_atomic_disable,
+ .atomic_check = zynqmp_disp_crtc_atomic_check,
+ .atomic_begin = zynqmp_disp_crtc_atomic_begin,
+};
+
+static void zynqmp_disp_crtc_destroy(struct drm_crtc *crtc)
+{
+ zynqmp_disp_crtc_atomic_disable(crtc, NULL);
+ drm_crtc_cleanup(crtc);
+}
+
+static int zynqmp_disp_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_enable_vblank(disp->dpsub->dp);
+
+ return 0;
+}
+
+static void zynqmp_disp_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ zynqmp_dp_disable_vblank(disp->dpsub->dp);
+}
+
+static int
+zynqmp_disp_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ /*
+ * CRTC prop values are just stored here and applied when CRTC gets
+ * enabled
+ */
+ if (property == disp->color_prop)
+ disp->color = val;
+ else if (property == disp->bg_c0_prop)
+ disp->bg_c0 = val;
+ else if (property == disp->bg_c1_prop)
+ disp->bg_c1 = val;
+ else if (property == disp->bg_c2_prop)
+ disp->bg_c2 = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+zynqmp_disp_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_disp *disp = crtc_to_disp(crtc);
+
+ if (property == disp->color_prop)
+ *val = disp->color;
+ else if (property == disp->bg_c0_prop)
+ *val = disp->bg_c0;
+ else if (property == disp->bg_c1_prop)
+ *val = disp->bg_c1;
+ else if (property == disp->bg_c2_prop)
+ *val = disp->bg_c2;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct drm_crtc_funcs zynqmp_disp_crtc_funcs = {
+ .destroy = zynqmp_disp_crtc_destroy,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_set_property = zynqmp_disp_crtc_atomic_set_property,
+ .atomic_get_property = zynqmp_disp_crtc_atomic_get_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = zynqmp_disp_crtc_enable_vblank,
+ .disable_vblank = zynqmp_disp_crtc_disable_vblank,
+};
+
+static void zynqmp_disp_create_crtc(struct zynqmp_disp *disp)
+{
+ struct drm_plane *plane = &disp->layers[ZYNQMP_DISP_LAYER_GFX].plane;
+ struct drm_mode_object *obj = &disp->xlnx_crtc.crtc.base;
+ int ret;
+
+ ret = drm_crtc_init_with_planes(disp->drm, &disp->xlnx_crtc.crtc, plane,
+ NULL, &zynqmp_disp_crtc_funcs, NULL);
+ drm_crtc_helper_add(&disp->xlnx_crtc.crtc,
+ &zynqmp_disp_crtc_helper_funcs);
+ drm_object_attach_property(obj, disp->color_prop, 0);
+ zynqmp_dp_set_color(disp->dpsub->dp, zynqmp_disp_color_enum[0].name);
+ drm_object_attach_property(obj, disp->bg_c0_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c1_prop, 0);
+ drm_object_attach_property(obj, disp->bg_c2_prop, 0);
+
+ disp->xlnx_crtc.get_max_width = &zynqmp_disp_get_max_width;
+ disp->xlnx_crtc.get_max_height = &zynqmp_disp_get_max_height;
+ disp->xlnx_crtc.get_format = &zynqmp_disp_get_format;
+ disp->xlnx_crtc.get_align = &zynqmp_disp_get_align;
+ disp->xlnx_crtc.get_dma_mask = &zynqmp_disp_get_dma_mask;
+ xlnx_crtc_register(disp->drm, &disp->xlnx_crtc);
+}
+
+static void zynqmp_disp_destroy_crtc(struct zynqmp_disp *disp)
+{
+ xlnx_crtc_unregister(disp->drm, &disp->xlnx_crtc);
+ zynqmp_disp_crtc_destroy(&disp->xlnx_crtc.crtc);
+}
+
+static void zynqmp_disp_map_crtc_to_plane(struct zynqmp_disp *disp)
+{
+ u32 possible_crtcs = drm_crtc_mask(&disp->xlnx_crtc.crtc);
+ unsigned int i;
+
+ for (i = 0; i < ZYNQMP_DISP_NUM_LAYERS; i++)
+ disp->layers[i].plane.possible_crtcs = possible_crtcs;
+}
+
+/*
+ * Component functions
+ */
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+ struct drm_device *drm = data;
+ int num;
+ u64 max;
+ int ret;
+
+ disp->drm = drm;
+
+ max = ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_MAX;
+ disp->g_alpha_prop = drm_property_create_range(drm, 0, "alpha", 0, max);
+ disp->g_alpha_en_prop = drm_property_create_bool(drm, 0,
+ "g_alpha_en");
+ num = ARRAY_SIZE(zynqmp_disp_color_enum);
+ disp->color_prop = drm_property_create_enum(drm, 0,
+ "output_color",
+ zynqmp_disp_color_enum,
+ num);
+ max = ZYNQMP_DISP_V_BLEND_BG_MAX;
+ disp->bg_c0_prop = drm_property_create_range(drm, 0, "bg_c0", 0, max);
+ disp->bg_c1_prop = drm_property_create_range(drm, 0, "bg_c1", 0, max);
+ disp->bg_c2_prop = drm_property_create_range(drm, 0, "bg_c2", 0, max);
+ disp->tpg_prop = drm_property_create_bool(drm, 0, "tpg");
+
+ ret = zynqmp_disp_create_plane(disp);
+ if (ret)
+ return ret;
+ zynqmp_disp_create_crtc(disp);
+ zynqmp_disp_map_crtc_to_plane(disp);
+
+ return 0;
+}
+
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_destroy_crtc(disp);
+ zynqmp_disp_destroy_plane(disp);
+ drm_property_destroy(disp->drm, disp->bg_c2_prop);
+ drm_property_destroy(disp->drm, disp->bg_c1_prop);
+ drm_property_destroy(disp->drm, disp->bg_c0_prop);
+ drm_property_destroy(disp->drm, disp->color_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_en_prop);
+ drm_property_destroy(disp->drm, disp->g_alpha_prop);
+}
+
+/*
+ * Platform initialization functions
+ */
+
+static int zynqmp_disp_enumerate_fmts(struct zynqmp_disp *disp)
+{
+ struct zynqmp_disp_layer *layer;
+ u32 *bus_fmts;
+ u32 i, size, num_bus_fmts;
+ u32 gfx_fmt = ZYNQMP_DISP_AV_BUF_GFX_FMT_RGB565;
+
+ num_bus_fmts = ARRAY_SIZE(av_buf_live_fmts);
+ bus_fmts = devm_kzalloc(disp->dev, sizeof(*bus_fmts) * num_bus_fmts,
+ GFP_KERNEL);
+ if (!bus_fmts)
+ return -ENOMEM;
+ for (i = 0; i < num_bus_fmts; i++)
+ bus_fmts[i] = av_buf_live_fmts[i].bus_fmt;
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_VID];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_vid_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_vid_fmts[i].drm_fmt;
+ layer->fmt = &av_buf_vid_fmts[ZYNQMP_DISP_AV_BUF_VID_FMT_YUYV];
+
+ layer = &disp->layers[ZYNQMP_DISP_LAYER_GFX];
+ layer->num_bus_fmts = num_bus_fmts;
+ layer->bus_fmts = bus_fmts;
+ size = ARRAY_SIZE(av_buf_gfx_fmts);
+ layer->num_fmts = size;
+ layer->drm_fmts = devm_kzalloc(disp->dev,
+ sizeof(*layer->drm_fmts) * size,
+ GFP_KERNEL);
+ if (!layer->drm_fmts)
+ return -ENOMEM;
+
+ for (i = 0; i < layer->num_fmts; i++)
+ layer->drm_fmts[i] = av_buf_gfx_fmts[i].drm_fmt;
+ if (zynqmp_disp_gfx_init_fmt < ARRAY_SIZE(zynqmp_disp_gfx_init_fmts))
+ gfx_fmt = zynqmp_disp_gfx_init_fmts[zynqmp_disp_gfx_init_fmt];
+ layer->fmt = &av_buf_gfx_fmts[gfx_fmt];
+
+ return 0;
+}
+
+int zynqmp_disp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_disp *disp;
+ struct resource *res;
+ int ret;
+
+ disp = devm_kzalloc(&pdev->dev, sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+ disp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "blend");
+ disp->blend.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->blend.base))
+ return PTR_ERR(disp->blend.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "av_buf");
+ disp->av_buf.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->av_buf.base))
+ return PTR_ERR(disp->av_buf.base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ disp->aud.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(disp->aud.base))
+ return PTR_ERR(disp->aud.base);
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->disp = disp;
+ disp->dpsub = dpsub;
+
+ ret = zynqmp_disp_enumerate_fmts(disp);
+ if (ret)
+ return ret;
+
+ /* Try the live PL video clock */
+ disp->_pl_pclk = devm_clk_get(disp->dev, "dp_live_video_in_clk");
+ if (!IS_ERR(disp->_pl_pclk)) {
+ disp->pclk = disp->_pl_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret)
+ disp->pclk = NULL;
+ } else if (PTR_ERR(disp->_pl_pclk) == -EPROBE_DEFER) {
+ return PTR_ERR(disp->_pl_pclk);
+ }
+
+ /* If the live PL video clock is not valid, fall back to PS clock */
+ if (!disp->pclk) {
+ disp->_ps_pclk = devm_clk_get(disp->dev, "dp_vtc_pixel_clk_in");
+ if (IS_ERR(disp->_ps_pclk)) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return PTR_ERR(disp->_ps_pclk);
+ }
+ disp->pclk = disp->_ps_pclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->pclk,
+ &disp->pclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to init any video clock\n");
+ return ret;
+ }
+ }
+
+ disp->aclk = devm_clk_get(disp->dev, "dp_apb_clk");
+ if (IS_ERR(disp->aclk))
+ return PTR_ERR(disp->aclk);
+ ret = zynqmp_disp_clk_enable(disp->aclk, &disp->aclk_en);
+ if (ret) {
+ dev_err(disp->dev, "failed to enable the APB clk\n");
+ return ret;
+ }
+
+ /* Try the live PL audio clock */
+ disp->_pl_audclk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
+ if (!IS_ERR(disp->_pl_audclk)) {
+ disp->audclk = disp->_pl_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ /* If the live PL audio clock is not valid, fall back to PS clock */
+ if (!disp->audclk) {
+ disp->_ps_audclk = devm_clk_get(disp->dev, "dp_aud_clk");
+ if (!IS_ERR(disp->_ps_audclk)) {
+ disp->audclk = disp->_ps_audclk;
+ ret = zynqmp_disp_clk_enable_disable(disp->audclk,
+ &disp->audclk_en);
+ if (ret)
+ disp->audclk = NULL;
+ }
+
+ if (!disp->audclk) {
+ dev_err(disp->dev,
+ "audio is disabled due to clock failure\n");
+ }
+ }
+
+ ret = zynqmp_disp_layer_create(disp);
+ if (ret)
+ goto error_aclk;
+
+ zynqmp_disp_init(disp);
+
+ return 0;
+
+error_aclk:
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ return ret;
+}
+
+int zynqmp_disp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_disp *disp = dpsub->disp;
+
+ zynqmp_disp_layer_destroy(disp);
+ if (disp->audclk)
+ zynqmp_disp_clk_disable(disp->audclk, &disp->audclk_en);
+ zynqmp_disp_clk_disable(disp->aclk, &disp->aclk_en);
+ zynqmp_disp_clk_disable(disp->pclk, &disp->pclk_en);
+ dpsub->disp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.h b/drivers/gpu/drm/xlnx/zynqmp_disp.h
new file mode 100644
index 000000000000..28d8188f8f5e
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Display Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DISP_H_
+#define _ZYNQMP_DISP_H_
+
+struct zynqmp_disp;
+
+void zynqmp_disp_handle_vblank(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_apb_clk_rate(struct zynqmp_disp *disp);
+bool zynqmp_disp_aud_enabled(struct zynqmp_disp *disp);
+unsigned int zynqmp_disp_get_aud_clk_rate(struct zynqmp_disp *disp);
+uint32_t zynqmp_disp_get_crtc_mask(struct zynqmp_disp *disp);
+
+int zynqmp_disp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_disp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_disp_probe(struct platform_device *pdev);
+int zynqmp_disp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DISP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
new file mode 100644
index 000000000000..719214f19649
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -0,0 +1,1916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dpsub.h"
+
+static uint zynqmp_dp_aux_timeout_ms = 50;
+module_param_named(aux_timeout_ms, zynqmp_dp_aux_timeout_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)");
+
+/*
+ * Some sink requires a delay after power on request
+ */
+static uint zynqmp_dp_power_on_delay_ms = 4;
+module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444);
+MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)");
+
+/* Link configuration registers */
+#define ZYNQMP_DP_TX_LINK_BW_SET 0x0
+#define ZYNQMP_DP_TX_LANE_CNT_SET 0x4
+#define ZYNQMP_DP_TX_ENHANCED_FRAME_EN 0x8
+#define ZYNQMP_DP_TX_TRAINING_PATTERN_SET 0xc
+#define ZYNQMP_DP_TX_SCRAMBLING_DISABLE 0x14
+#define ZYNQMP_DP_TX_DOWNSPREAD_CTL 0x18
+#define ZYNQMP_DP_TX_SW_RESET 0x1c
+#define ZYNQMP_DP_TX_SW_RESET_STREAM1 BIT(0)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM2 BIT(1)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM3 BIT(2)
+#define ZYNQMP_DP_TX_SW_RESET_STREAM4 BIT(3)
+#define ZYNQMP_DP_TX_SW_RESET_AUX BIT(7)
+#define ZYNQMP_DP_TX_SW_RESET_ALL (ZYNQMP_DP_TX_SW_RESET_STREAM1 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM2 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM3 | \
+ ZYNQMP_DP_TX_SW_RESET_STREAM4 | \
+ ZYNQMP_DP_TX_SW_RESET_AUX)
+
+/* Core enable registers */
+#define ZYNQMP_DP_TX_ENABLE 0x80
+#define ZYNQMP_DP_TX_ENABLE_MAIN_STREAM 0x84
+#define ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET 0xc0
+#define ZYNQMP_DP_TX_VERSION 0xf8
+#define ZYNQMP_DP_TX_VERSION_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_VERSION_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_VERSION_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_VERSION_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_VERSION_REVISION_MASK GENMASK(15, 12)
+#define ZYNQMP_DP_TX_VERSION_REVISION_SHIFT 12
+#define ZYNQMP_DP_TX_VERSION_PATCH_MASK GENMASK(11, 8)
+#define ZYNQMP_DP_TX_VERSION_PATCH_SHIFT 8
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_MASK GENMASK(7, 0)
+#define ZYNQMP_DP_TX_VERSION_INTERNAL_SHIFT 0
+
+/* Core ID registers */
+#define ZYNQMP_DP_TX_CORE_ID 0xfc
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_MASK GENMASK(31, 24)
+#define ZYNQMP_DP_TX_CORE_ID_MAJOR_SHIFT 24
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_MASK GENMASK(23, 16)
+#define ZYNQMP_DP_TX_CORE_ID_MINOR_SHIFT 16
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_MASK GENMASK(15, 8)
+#define ZYNQMP_DP_TX_CORE_ID_REVISION_SHIFT 8
+#define ZYNQMP_DP_TX_CORE_ID_DIRECTION GENMASK(1)
+
+/* AUX channel interface registers */
+#define ZYNQMP_DP_TX_AUX_COMMAND 0x100
+#define ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT 8
+#define ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY BIT(12)
+#define ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT 0
+#define ZYNQMP_DP_TX_AUX_WRITE_FIFO 0x104
+#define ZYNQMP_DP_TX_AUX_ADDRESS 0x108
+#define ZYNQMP_DP_TX_CLK_DIVIDER 0x10c
+#define ZYNQMP_DP_TX_CLK_DIVIDER_MHZ 1000000
+#define ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT 8
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE 0x130
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD BIT(0)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST BIT(1)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY BIT(2)
+#define ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_DATA 0x134
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE 0x138
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_NACK BIT(0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_DEFER BIT(1)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK (0)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_NACK BIT(2)
+#define ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_DEFER BIT(3)
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT 0x13c
+#define ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK 0xff
+#define ZYNQMP_DP_TX_INTR_STATUS 0x140
+#define ZYNQMP_DP_TX_INTR_MASK 0x144
+#define ZYNQMP_DP_TX_INTR_HPD_IRQ BIT(0)
+#define ZYNQMP_DP_TX_INTR_HPD_EVENT BIT(1)
+#define ZYNQMP_DP_TX_INTR_REPLY_RECV BIT(2)
+#define ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT BIT(3)
+#define ZYNQMP_DP_TX_INTR_HPD_PULSE BIT(4)
+#define ZYNQMP_DP_TX_INTR_EXT_PKT_TXD BIT(5)
+#define ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW BIT(12)
+#define ZYNQMP_DP_TX_INTR_VBLANK_START BIT(13)
+#define ZYNQMP_DP_TX_INTR_PIXEL0_MATCH BIT(14)
+#define ZYNQMP_DP_TX_INTR_PIXEL1_MATCH BIT(15)
+#define ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK 0x3f0000
+#define ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK 0xfc00000
+#define ZYNQMP_DP_TX_INTR_CUST_TS_2 BIT(28)
+#define ZYNQMP_DP_TX_INTR_CUST_TS BIT(29)
+#define ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS BIT(30)
+#define ZYNQMP_DP_TX_INTR_VSYNC_TS BIT(31)
+#define ZYNQMP_DP_TX_INTR_ALL (ZYNQMP_DP_TX_INTR_HPD_IRQ | \
+ ZYNQMP_DP_TX_INTR_HPD_EVENT | \
+ ZYNQMP_DP_TX_INTR_REPLY_RECV | \
+ ZYNQMP_DP_TX_INTR_REPLY_TIMEOUT | \
+ ZYNQMP_DP_TX_INTR_HPD_PULSE | \
+ ZYNQMP_DP_TX_INTR_EXT_PKT_TXD | \
+ ZYNQMP_DP_TX_INTR_LIV_ABUF_UNDRFLW | \
+ ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK | \
+ ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+#define ZYNQMP_DP_TX_NO_INTR_ALL (ZYNQMP_DP_TX_INTR_PIXEL0_MATCH | \
+ ZYNQMP_DP_TX_INTR_PIXEL1_MATCH | \
+ ZYNQMP_DP_TX_INTR_CUST_TS_2 | \
+ ZYNQMP_DP_TX_INTR_CUST_TS | \
+ ZYNQMP_DP_TX_INTR_EXT_VSYNC_TS | \
+ ZYNQMP_DP_TX_INTR_VSYNC_TS)
+#define ZYNQMP_DP_TX_REPLY_DATA_CNT 0x148
+#define ZYNQMP_DP_SUB_TX_INTR_STATUS 0x3a0
+#define ZYNQMP_DP_SUB_TX_INTR_MASK 0x3a4
+#define ZYNQMP_DP_SUB_TX_INTR_EN 0x3a8
+#define ZYNQMP_DP_SUB_TX_INTR_DS 0x3ac
+
+/* Main stream attribute registers */
+#define ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL 0x180
+#define ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL 0x184
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY 0x188
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT 0
+#define ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH 0x18c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH 0x190
+#define ZYNQMP_DP_TX_MAIN_STREAM_HRES 0x194
+#define ZYNQMP_DP_TX_MAIN_STREAM_VRES 0x198
+#define ZYNQMP_DP_TX_MAIN_STREAM_HSTART 0x19c
+#define ZYNQMP_DP_TX_MAIN_STREAM_VSTART 0x1a0
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0 0x1a4
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_FORMAT_SHIFT 1
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_DYNAMIC_RANGE BIT(3)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_YCBCR_COLRIMETRY BIT(4)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_BPC_SHIFT 5
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC1 0x1a8
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_INTERLACED_VERT BIT(0)
+#define ZYNQMP_DP_TX_MAIN_STREAM_MISC0_STEREO_VID_SHIFT 1
+#define ZYNQMP_DP_TX_M_VID 0x1ac
+#define ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE 0x1b0
+#define ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE 64
+#define ZYNQMP_DP_TX_N_VID 0x1b4
+#define ZYNQMP_DP_TX_USER_PIXEL_WIDTH 0x1b8
+#define ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE 0x1bc
+#define ZYNQMP_DP_TX_MIN_BYTES_PER_TU 0x1c4
+#define ZYNQMP_DP_TX_FRAC_BYTES_PER_TU 0x1c8
+#define ZYNQMP_DP_TX_INIT_WAIT 0x1cc
+
+/* PHY configuration and status registers */
+#define ZYNQMP_DP_TX_PHY_CONFIG 0x200
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET BIT(0)
+#define ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET BIT(1)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET BIT(8)
+#define ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET BIT(9)
+#define ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET (ZYNQMP_DP_TX_PHY_CONFIG_PHY_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_GTTX_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PMA_RESET | \
+ ZYNQMP_DP_TX_PHY_CONFIG_PHY_PCS_RESET)
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_0 0x210
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_1 0x214
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_2 0x218
+#define ZYNQMP_DP_TX_PHY_PREEMPHASIS_LANE_3 0x21c
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_0 0x220
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_1 0x224
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_2 0x228
+#define ZYNQMP_DP_TX_PHY_VOLTAGE_DIFF_LANE_3 0x22c
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING 0x234
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162 0x1
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270 0x3
+#define ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540 0x5
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN 0x238
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_0 BIT(0)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_1 BIT(1)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
+#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_0 0x23c
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_1 0x240
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_2 0x244
+#define ZYNQMP_DP_TX_PHY_PRECURSOR_LANE_3 0x248
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_2 0x254
+#define ZYNQMP_DP_TX_PHY_POSTCURSOR_LANE_3 0x258
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 0x24c
+#define ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_1 0x250
+#define ZYNQMP_DP_TX_PHY_STATUS 0x280
+#define ZYNQMP_DP_TX_PHY_STATUS_PLL_LOCKED_SHIFT 4
+#define ZYNQMP_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED BIT(6)
+
+/* Audio registers */
+#define ZYNQMP_DP_TX_AUDIO_CONTROL 0x300
+#define ZYNQMP_DP_TX_AUDIO_CHANNELS 0x304
+#define ZYNQMP_DP_TX_AUDIO_INFO_DATA 0x308
+#define ZYNQMP_DP_TX_AUDIO_M_AUD 0x328
+#define ZYNQMP_DP_TX_AUDIO_N_AUD 0x32c
+#define ZYNQMP_DP_TX_AUDIO_EXT_DATA 0x330
+
+#define ZYNQMP_DP_MISC0_RGB (0)
+#define ZYNQMP_DP_MISC0_YCRCB_422 (5 << 1)
+#define ZYNQMP_DP_MISC0_YCRCB_444 (6 << 1)
+#define ZYNQMP_DP_MISC0_FORMAT_MASK 0xe
+#define ZYNQMP_DP_MISC0_BPC_6 (0 << 5)
+#define ZYNQMP_DP_MISC0_BPC_8 (1 << 5)
+#define ZYNQMP_DP_MISC0_BPC_10 (2 << 5)
+#define ZYNQMP_DP_MISC0_BPC_12 (3 << 5)
+#define ZYNQMP_DP_MISC0_BPC_16 (4 << 5)
+#define ZYNQMP_DP_MISC0_BPC_MASK 0xe0
+#define ZYNQMP_DP_MISC1_Y_ONLY (1 << 7)
+
+#define ZYNQMP_DP_MAX_LANES 2
+#define ZYNQMP_MAX_FREQ 3000000
+
+#define DP_REDUCED_BIT_RATE 162000
+#define DP_HIGH_BIT_RATE 270000
+#define DP_HIGH_BIT_RATE2 540000
+#define DP_MAX_TRAINING_TRIES 5
+#define DP_V1_2 0x12
+
+/**
+ * struct zynqmp_dp_link_config - Common link config between source and sink
+ * @max_rate: maximum link rate
+ * @max_lanes: maximum number of lanes
+ */
+struct zynqmp_dp_link_config {
+ int max_rate;
+ u8 max_lanes;
+};
+
+/**
+ * struct zynqmp_dp_mode - Configured mode of DisplayPort
+ * @bw_code: code for bandwidth(link rate)
+ * @lane_cnt: number of lanes
+ * @pclock: pixel clock frequency of current mode
+ * @fmt: format identifier string
+ */
+struct zynqmp_dp_mode {
+ u8 bw_code;
+ u8 lane_cnt;
+ int pclock;
+ const char *fmt;
+};
+
+/**
+ * struct zynqmp_dp_config - Configuration of DisplayPort from DTS
+ * @misc0: misc0 configuration (per DP v1.2 spec)
+ * @misc1: misc1 configuration (per DP v1.2 spec)
+ * @bpp: bits per pixel
+ * @bpc: bits per component
+ * @num_colors: number of color components
+ */
+struct zynqmp_dp_config {
+ u8 misc0;
+ u8 misc1;
+ u8 bpp;
+ u8 bpc;
+ u8 num_colors;
+};
+
+/**
+ * struct zynqmp_dp - Xilinx DisplayPort core
+ * @encoder: the drm encoder structure
+ * @connector: the drm connector structure
+ * @sync_prop: synchronous mode property
+ * @bpc_prop: bpc mode property
+ * @dev: device structure
+ * @dpsub: Display subsystem
+ * @drm: DRM core
+ * @iomem: device I/O memory for register access
+ * @irq: irq
+ * @config: IP core configuration from DTS
+ * @aux: aux channel
+ * @phy: PHY handles for DP lanes
+ * @num_lanes: number of enabled phy lanes
+ * @hpd_work: hot plug detection worker
+ * @status: connection status
+ * @enabled: flag to indicate if the device is enabled
+ * @dpms: current dpms state
+ * @dpcd: DP configuration data from currently connected sink device
+ * @link_config: common link configuration between IP core and sink device
+ * @mode: current mode between IP core and sink device
+ * @train_set: set of training data
+ */
+struct zynqmp_dp {
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ struct drm_property *sync_prop;
+ struct drm_property *bpc_prop;
+ struct device *dev;
+ struct zynqmp_dpsub *dpsub;
+ struct drm_device *drm;
+ void __iomem *iomem;
+ int irq;
+
+ struct zynqmp_dp_config config;
+ struct drm_dp_aux aux;
+ struct phy *phy[ZYNQMP_DP_MAX_LANES];
+ u8 num_lanes;
+ struct delayed_work hpd_work;
+ enum drm_connector_status status;
+ bool enabled;
+
+ int dpms;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ struct zynqmp_dp_link_config link_config;
+ struct zynqmp_dp_mode mode;
+ u8 train_set[ZYNQMP_DP_MAX_LANES];
+};
+
+static inline struct zynqmp_dp *encoder_to_dp(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct zynqmp_dp, encoder);
+}
+
+static inline struct zynqmp_dp *connector_to_dp(struct drm_connector *connector)
+{
+ return container_of(connector, struct zynqmp_dp, connector);
+}
+
+static void zynqmp_dp_write(void __iomem *base, int offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 zynqmp_dp_read(void __iomem *base, int offset)
+{
+ return readl(base + offset);
+}
+
+static void zynqmp_dp_clr(void __iomem *base, int offset, u32 clr)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) & ~clr);
+}
+
+static void zynqmp_dp_set(void __iomem *base, int offset, u32 set)
+{
+ zynqmp_dp_write(base, offset, zynqmp_dp_read(base, offset) | set);
+}
+
+/*
+ * Internal functions: used by zynqmp_disp.c
+ */
+
+/**
+ * zynqmp_dp_update_bpp - Update the current bpp config
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the current bpp based on the color format: bpc & num_colors.
+ * Any function that changes bpc or num_colors should call this
+ * to keep the bpp value in sync.
+ */
+static void zynqmp_dp_update_bpp(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->bpp = dp->config.bpc * dp->config.num_colors;
+}
+
+/**
+ * zynqmp_dp_set_color - Set the color
+ * @dp: DisplayPort IP core structure
+ * @color: color string, from zynqmp_disp_color_enum
+ *
+ * Update misc register values based on @color string.
+ *
+ * Return: 0 on success, or -EINVAL.
+ */
+int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_FORMAT_MASK;
+ config->misc1 &= ~ZYNQMP_DP_MISC1_Y_ONLY;
+ if (strcmp(color, "rgb") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_RGB;
+ config->num_colors = 3;
+ } else if (strcmp(color, "ycrcb422") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_422;
+ config->num_colors = 2;
+ } else if (strcmp(color, "ycrcb444") == 0) {
+ config->misc0 |= ZYNQMP_DP_MISC0_YCRCB_444;
+ config->num_colors = 3;
+ } else if (strcmp(color, "yonly") == 0) {
+ config->misc1 |= ZYNQMP_DP_MISC1_Y_ONLY;
+ config->num_colors = 1;
+ } else {
+ dev_err(dp->dev, "Invalid colormetry in DT\n");
+ return -EINVAL;
+ }
+ zynqmp_dp_update_bpp(dp);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_enable_vblank - Enable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Enable vblank interrupt
+ */
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/**
+ * zynqmp_dp_disable_vblank - Disable vblank
+ * @dp: DisplayPort IP core structure
+ *
+ * Disable vblank interrupt
+ */
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_VBLANK_START);
+}
+
+/*
+ * DP PHY functions
+ */
+
+/**
+ * zynqmp_dp_init_phy - Initialize the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the phy.
+ *
+ * Return: 0 if the phy instances are initialized correctly, or the error code
+ * returned from the callee functions.
+ * Note: We can call this function without any phy lane assigned to DP.
+ */
+static int zynqmp_dp_init_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_init(dp->phy[i]);
+ if (ret) {
+ dev_err(dp->dev, "failed to init phy lane %d\n", i);
+ return ret;
+ }
+ }
+ /* Wait for PLL to be locked for the primary (1st) lane */
+ if (dp->phy[0]) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_clr(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ ret = xpsgtr_wait_pll_lock(dp->phy[0]);
+ if (ret) {
+ dev_err(dp->dev, "failed to lock pll\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_phy - Exit the phy
+ * @dp: DisplayPort IP core structure
+ *
+ * Exit the phy.
+ */
+static void zynqmp_dp_exit_phy(struct zynqmp_dp *dp)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ ret = phy_exit(dp->phy[i]);
+ if (ret)
+ dev_err(dp->dev, "failed to exit phy(%d) %d\n", i, ret);
+ }
+}
+
+/**
+ * zynqmp_dp_phy_ready - Check if PHY is ready
+ * @dp: DisplayPort IP core structure
+ *
+ * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times.
+ * This amount of delay was suggested by IP designer.
+ *
+ * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready.
+ */
+static int zynqmp_dp_phy_ready(struct zynqmp_dp *dp)
+{
+ u32 i, reg, ready;
+
+ ready = (1 << dp->num_lanes) - 1;
+
+ /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_TX_PHY_STATUS);
+ if ((reg & ready) == ready)
+ return 0;
+
+ if (i == 100) {
+ dev_err(dp->dev, "PHY isn't ready\n");
+ return -ENODEV;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ return 0;
+}
+
+/*
+ * Power Management functions
+ */
+/**
+ * zynqmp_dp_pm_resume - Resume DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Resume the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_resume(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_init_phy(dp);
+}
+/**
+ * zynqmp_dp_pm_suspend - Suspend DP IP
+ * @dp: DisplayPort IP core structure
+ *
+ * Suspend the DP IP including PHY and pipeline.
+ */
+void zynqmp_dp_pm_suspend(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_exit_phy(dp);
+}
+/*
+ * DP functions
+ */
+
+/**
+ * zynqmp_dp_max_rate - Calculate and return available max pixel clock
+ * @link_rate: link rate (Kilo-bytes / sec)
+ * @lane_num: number of lanes
+ * @bpp: bits per pixel
+ *
+ * Return: max pixel clock (KHz) supported by current link config.
+ */
+static inline int zynqmp_dp_max_rate(int link_rate, u8 lane_num, u8 bpp)
+{
+ return link_rate * lane_num * 8 / bpp;
+}
+
+/**
+ * zynqmp_dp_mode_configure - Configure the link values
+ * @dp: DisplayPort IP core structure
+ * @pclock: pixel clock for requested display mode
+ * @current_bw: current link rate
+ *
+ * Find the link configuration values, rate and lane count for requested pixel
+ * clock @pclock. The @pclock is stored in the mode to be used in other
+ * functions later. The returned rate is downshifted from the current rate
+ * @current_bw.
+ *
+ * Return: Current link rate code, or -EINVAL.
+ */
+static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock,
+ u8 current_bw)
+{
+ int max_rate = dp->link_config.max_rate;
+ u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate);
+ u8 bpp = dp->config.bpp;
+ u8 lane_cnt;
+ s8 i;
+
+ if (current_bw == DP_LINK_BW_1_62) {
+ dev_err(dp->dev, "can't downshift. already lowest link rate\n");
+ return -EINVAL;
+ }
+
+ for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) {
+ if (current_bw && bws[i] >= current_bw)
+ continue;
+
+ if (bws[i] <= max_link_rate_code)
+ break;
+ }
+
+ for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) {
+ int bw;
+ u32 rate;
+
+ bw = drm_dp_bw_code_to_link_rate(bws[i]);
+ rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp);
+ if (pclock <= rate) {
+ dp->mode.bw_code = bws[i];
+ dp->mode.lane_cnt = lane_cnt;
+ dp->mode.pclock = pclock;
+ return dp->mode.bw_code;
+ }
+ }
+
+ dev_err(dp->dev, "failed to configure link values\n");
+
+ return -EINVAL;
+}
+
+/**
+ * zynqmp_dp_adjust_train - Adjust train values
+ * @dp: DisplayPort IP core structure
+ * @link_status: link status from sink which contains requested training values
+ */
+static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 *train_set = dp->train_set;
+ u8 voltage = 0, preemphasis = 0;
+ u8 i;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u8 v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 p = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+
+ if (v > voltage)
+ voltage = v;
+
+ if (p > preemphasis)
+ preemphasis = p;
+ }
+
+ if (voltage >= DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ voltage |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (preemphasis >= DP_TRAIN_PRE_EMPH_LEVEL_2)
+ preemphasis |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++)
+ train_set[i] = voltage | preemphasis;
+}
+
+/**
+ * zynqmp_dp_update_vs_emph - Update the training values
+ * @dp: DisplayPort IP core structure
+ *
+ * Update the training values based on the request from sink. The mapped values
+ * are predefined, and values(vs, pe, pc) are from the device manual.
+ *
+ * Return: 0 if vs and emph are updated successfully, or the error code returned
+ * by drm_dp_dpcd_write().
+ */
+static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
+{
+ u8 *train_set = dp->train_set;
+ u8 i, v_level, p_level;
+ int ret;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
+ dp->mode.lane_cnt);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dp->mode.lane_cnt; i++) {
+ u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
+
+ v_level = (train_set[i] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p_level = (train_set[i] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ xpsgtr_margining_factor(dp->phy[i], p_level, v_level);
+ xpsgtr_override_deemph(dp->phy[i], p_level, v_level);
+ zynqmp_dp_write(dp->iomem, reg, 0x2);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_cr - Train clock recovery
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if clock recovery train is done successfully, or corresponding
+ * error code.
+ */
+static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 vs = 0, tries = 0;
+ u16 max_tries, i;
+ bool cr_done;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 256 loops should be maximum iterations for 4 lanes and 4 values.
+ * So, This loop should exit before 512 iterations
+ */
+ for (max_tries = 0; max_tries < 512; max_tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ cr_done = drm_dp_clock_recovery_ok(link_status, lane_cnt);
+ if (cr_done)
+ break;
+
+ for (i = 0; i < lane_cnt; i++)
+ if (!(dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED))
+ break;
+ if (i == lane_cnt)
+ break;
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == vs)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == DP_MAX_TRAINING_TRIES)
+ break;
+
+ vs = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!cr_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train_ce - Train channel equalization
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if channel equalization train is done successfully, or
+ * corresponding error code.
+ */
+static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 pat, tries;
+ int ret;
+ bool ce_done;
+
+ if (dp->dpcd[DP_DPCD_REV] >= DP_V1_2 &&
+ dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED)
+ pat = DP_TRAINING_PATTERN_3;
+ else
+ pat = DP_TRAINING_PATTERN_2;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET, pat);
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ pat | DP_LINK_SCRAMBLING_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
+ ret = zynqmp_dp_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
+ if (ret < 0)
+ return ret;
+
+ ce_done = drm_dp_channel_eq_ok(link_status, lane_cnt);
+ if (ce_done)
+ break;
+
+ zynqmp_dp_adjust_train(dp, link_status);
+ }
+
+ if (!ce_done)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_link_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int zynqmp_dp_train(struct zynqmp_dp *dp)
+{
+ u32 reg;
+ u8 bw_code = dp->mode.bw_code;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u8 aux_lane_cnt = lane_cnt;
+ bool enhanced;
+ int ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LANE_CNT_SET, lane_cnt);
+ enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
+ if (enhanced) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENHANCED_FRAME_EN, 1);
+ aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ }
+
+ if (dp->dpcd[3] & 0x1) {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 1);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ } else {
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_DOWNSPREAD_CTL, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL, 0);
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, aux_lane_cnt);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set lane count\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+ DP_SET_ANSI_8B10B);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set ANSI 8B/10B encoding\n");
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_LINK_BW_SET, bw_code);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to set DP bandwidth\n");
+ return ret;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_LINK_BW_SET, bw_code);
+ switch (bw_code) {
+ case DP_LINK_BW_1_62:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162;
+ break;
+ case DP_LINK_BW_2_7:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270;
+ break;
+ case DP_LINK_BW_5_4:
+ default:
+ reg = ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540;
+ break;
+ }
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_CLOCK_FEEDBACK_SETTING,
+ reg);
+ ret = zynqmp_dp_phy_ready(dp);
+ if (ret < 0)
+ return ret;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 1);
+ memset(dp->train_set, 0, 4);
+ ret = zynqmp_dp_link_train_cr(dp);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_dp_link_train_ce(dp);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to disable training pattern\n");
+ return ret;
+ }
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_SCRAMBLING_DISABLE, 0);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_train_loop - Downshift the link rate during training
+ * @dp: DisplayPort IP core structure
+ *
+ * Train the link by downshifting the link rate if training is not successful.
+ */
+static void zynqmp_dp_train_loop(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_mode *mode = &dp->mode;
+ u8 bw = mode->bw_code;
+ int ret;
+
+ do {
+ if (dp->status == connector_status_disconnected ||
+ !dp->enabled)
+ return;
+
+ ret = zynqmp_dp_train(dp);
+ if (!ret)
+ return;
+
+ ret = zynqmp_dp_mode_configure(dp, mode->pclock, bw);
+ if (ret < 0)
+ goto err_out;
+
+ bw = ret;
+ } while (bw >= DP_LINK_BW_1_62);
+
+err_out:
+ dev_err(dp->dev, "failed to train the DP link\n");
+}
+
+/*
+ * DP Aux functions
+ */
+
+#define AUX_READ_BIT 0x1
+
+/**
+ * zynqmp_dp_aux_cmd_submit - Submit aux command
+ * @dp: DisplayPort IP core structure
+ * @cmd: aux command
+ * @addr: aux address
+ * @buf: buffer for command data
+ * @bytes: number of bytes for @buf
+ * @reply: reply code to be returned
+ *
+ * Submit an aux command. All aux related commands, native or i2c aux
+ * read/write, are submitted through this function. The function is mapped to
+ * the transfer function of struct drm_dp_aux. This function involves in
+ * multiple register reads/writes, thus synchronization is needed, and it is
+ * done by drm_dp_helper using @hw_mutex. The calling thread goes into sleep
+ * if there's no immediate reply to the command submission. The reply code is
+ * returned at @reply if @reply != NULL.
+ *
+ * Return: 0 if the command is submitted properly, or corresponding error code:
+ * -EBUSY when there is any request already being processed
+ * -ETIMEDOUT when receiving reply is timed out
+ * -EIO when received bytes are less than requested
+ */
+static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
+ u8 *buf, u8 bytes, u8 *reply)
+{
+ bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ void __iomem *iomem = dp->iomem;
+ u32 reg, i;
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REQUEST)
+ return -EBUSY;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_ADDRESS, addr);
+ if (!is_read)
+ for (i = 0; i < bytes; i++)
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_WRITE_FIFO,
+ buf[i]);
+
+ reg = cmd << ZYNQMP_DP_TX_AUX_COMMAND_CMD_SHIFT;
+ if (!buf || !bytes)
+ reg |= ZYNQMP_DP_TX_AUX_COMMAND_ADDRESS_ONLY;
+ else
+ reg |= (bytes - 1) << ZYNQMP_DP_TX_AUX_COMMAND_BYTES_SHIFT;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUX_COMMAND, reg);
+
+ /* Wait for reply to be delivered upto 2ms */
+ for (i = 0; ; i++) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY)
+ break;
+
+ if (reg & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT ||
+ i == 2)
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1100);
+ }
+
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_AUX_REPLY_CODE);
+ if (reply)
+ *reply = reg;
+
+ if (is_read &&
+ (reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_AUX_ACK ||
+ reg == ZYNQMP_DP_TX_AUX_REPLY_CODE_I2C_ACK)) {
+ reg = zynqmp_dp_read(iomem, ZYNQMP_DP_TX_REPLY_DATA_CNT);
+ if ((reg & ZYNQMP_DP_TX_AUX_REPLY_CNT_MASK) != bytes)
+ return -EIO;
+
+ for (i = 0; i < bytes; i++) {
+ buf[i] = zynqmp_dp_read(iomem,
+ ZYNQMP_DP_TX_AUX_REPLY_DATA);
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct zynqmp_dp *dp = container_of(aux, struct zynqmp_dp, aux);
+ int ret;
+ unsigned int i, iter;
+
+ /* Number of loops = timeout in msec / aux delay (400 usec) */
+ iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
+ iter = iter ? iter : 1;
+
+ for (i = 0; i < iter; i++) {
+ ret = zynqmp_dp_aux_cmd_submit(dp, msg->request, msg->address,
+ msg->buffer, msg->size,
+ &msg->reply);
+ if (!ret) {
+ dev_dbg(dp->dev, "aux %d retries\n", i);
+ return msg->size;
+ }
+
+ if (dp->status == connector_status_disconnected) {
+ dev_dbg(dp->dev, "no connected aux device\n");
+ return -ENODEV;
+ }
+
+ usleep_range(400, 500);
+ }
+
+ dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_init_aux - Initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * Initialize the DP aux. The aux clock is derived from the axi clock, so
+ * this function gets the axi clock frequency and calculates the filter
+ * value. Additionally, the interrupts and transmitter are enabled.
+ *
+ * Return: 0 on success, error value otherwise
+ */
+static int zynqmp_dp_init_aux(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 reg, w;
+
+ rate = zynqmp_disp_get_apb_clk_rate(dp->dpsub->disp);
+ if (rate < ZYNQMP_DP_TX_CLK_DIVIDER_MHZ) {
+ dev_err(dp->dev, "aclk should be higher than 1MHz\n");
+ return -EINVAL;
+ }
+
+ /* Allowable values for this register are: 8, 16, 24, 32, 40, 48 */
+ for (w = 8; w <= 48; w += 8) {
+ /* AUX pulse width should be between 0.4 to 0.6 usec */
+ if (w >= (4 * rate / 10000000) &&
+ w <= (6 * rate / 10000000))
+ break;
+ }
+
+ if (w > 48) {
+ dev_err(dp->dev, "aclk frequency too high\n");
+ return -EINVAL;
+ }
+ reg = w << ZYNQMP_DP_TX_CLK_DIVIDER_AUX_FILTER_SHIFT;
+ reg |= rate / ZYNQMP_DP_TX_CLK_DIVIDER_MHZ;
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_CLK_DIVIDER, reg);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_EN,
+ ZYNQMP_DP_TX_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS,
+ ZYNQMP_DP_TX_NO_INTR_ALL);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 1);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dp_exit_aux - De-initialize the DP aux
+ * @dp: DisplayPort IP core structure
+ *
+ * De-initialize the DP aux. Disable all interrupts which are enabled
+ * through aux initialization, as well as the transmitter.
+ */
+static void zynqmp_dp_exit_aux(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_DS, 0xffffffff);
+}
+
+/*
+ * Generic DP functions
+ */
+
+/**
+ * zynqmp_dp_update_misc - Write the misc registers
+ * @dp: DisplayPort IP core structure
+ *
+ * The misc register values are stored in the structure, and this
+ * function applies the values into the registers.
+ */
+static void zynqmp_dp_update_misc(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC0,
+ dp->config.misc0);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MAIN_STREAM_MISC1,
+ dp->config.misc1);
+}
+
+/**
+ * zynqmp_dp_set_sync_mode - Set the sync mode bit in the software misc state
+ * @dp: DisplayPort IP core structure
+ * @mode: flag if the sync mode should be on or off
+ *
+ * Set the bit in software misc state. To apply to hardware,
+ * zynqmp_dp_update_misc() should be called.
+ */
+static void zynqmp_dp_set_sync_mode(struct zynqmp_dp *dp, bool mode)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ if (mode)
+ config->misc0 |= ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ else
+ config->misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+}
+
+/**
+ * zynqmp_dp_get_sync_mode - Get the sync mode state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: true if the sync mode is on, or false
+ */
+static bool zynqmp_dp_get_sync_mode(struct zynqmp_dp *dp)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+
+ return !!(config->misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC);
+}
+
+/**
+ * zynqmp_dp_set_bpc - Set bpc value in software misc state
+ * @dp: DisplayPort IP core structure
+ * @bpc: bits per component
+ *
+ * Return: 0 on success, or the fallback bpc value
+ */
+static u8 zynqmp_dp_set_bpc(struct zynqmp_dp *dp, u8 bpc)
+{
+ struct zynqmp_dp_config *config = &dp->config;
+ u8 ret = 0;
+
+ if (dp->connector.display_info.bpc &&
+ dp->connector.display_info.bpc != bpc) {
+ dev_err(dp->dev, "requested bpc (%u) != display info (%u)\n",
+ bpc, dp->connector.display_info.bpc);
+ bpc = dp->connector.display_info.bpc;
+ }
+
+ config->misc0 &= ~ZYNQMP_DP_MISC0_BPC_MASK;
+ switch (bpc) {
+ case 6:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_6;
+ break;
+ case 8:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ break;
+ case 10:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_10;
+ break;
+ case 12:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_12;
+ break;
+ case 16:
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_16;
+ break;
+ default:
+ dev_err(dp->dev, "Not supported bpc (%u). fall back to 8bpc\n",
+ bpc);
+ config->misc0 |= ZYNQMP_DP_MISC0_BPC_8;
+ ret = 8;
+ break;
+ }
+ config->bpc = bpc;
+ zynqmp_dp_update_bpp(dp);
+
+ return ret;
+}
+
+/**
+ * zynqmp_dp_get_bpc - Set bpc value from software state
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: current bpc value
+ */
+static u8 zynqmp_dp_get_bpc(struct zynqmp_dp *dp)
+{
+ return dp->config.bpc;
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_transfer_unit - Set the transfer unit values
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Set the transfer unit, and caculate all transfer unit size related values.
+ * Calculation is based on DP and IP core specification.
+ */
+static void
+zynqmp_dp_encoder_mode_set_transfer_unit(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ u32 tu = ZYNQMP_DP_TX_DEF_TRANSFER_UNIT_SIZE;
+ u32 bw, vid_kbytes, avg_bytes_per_tu, init_wait;
+
+ /* Use the max transfer unit size (default) */
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_TRANSFER_UNIT_SIZE, tu);
+
+ vid_kbytes = mode->clock * (dp->config.bpp / 8);
+ bw = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ avg_bytes_per_tu = vid_kbytes * tu / (dp->mode.lane_cnt * bw / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_MIN_BYTES_PER_TU,
+ avg_bytes_per_tu / 1000);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FRAC_BYTES_PER_TU,
+ avg_bytes_per_tu % 1000);
+
+ /* Configure the initial wait cycle based on transfer unit size */
+ if (tu < (avg_bytes_per_tu / 1000))
+ init_wait = 0;
+ else if ((avg_bytes_per_tu / 1000) <= 4)
+ init_wait = tu;
+ else
+ init_wait = tu - avg_bytes_per_tu / 1000;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_INIT_WAIT, init_wait);
+}
+
+/**
+ * zynqmp_dp_encoder_mode_set_stream - Configure the main stream
+ * @dp: DisplayPort IP core structure
+ * @mode: requested display mode
+ *
+ * Configure the main stream based on the requested mode @mode. Calculation is
+ * based on IP core specification.
+ */
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode)
+{
+ void __iomem *iomem = dp->iomem;
+ u8 lane_cnt = dp->mode.lane_cnt;
+ u32 reg, wpl;
+ unsigned int rate;
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HTOTAL, mode->htotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VTOTAL, mode->vtotal);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_POLARITY,
+ (!!(mode->flags & DRM_MODE_FLAG_PVSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_VSYNC_SHIFT) |
+ (!!(mode->flags & DRM_MODE_FLAG_PHSYNC) <<
+ ZYNQMP_DP_TX_MAIN_STREAM_POLARITY_HSYNC_SHIFT));
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSWIDTH,
+ mode->hsync_end - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSWIDTH,
+ mode->vsync_end - mode->vsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HRES, mode->hdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VRES, mode->vdisplay);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_HSTART,
+ mode->htotal - mode->hsync_start);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_MAIN_STREAM_VSTART,
+ mode->vtotal - mode->vsync_start);
+
+ /* In synchronous mode, set the diviers */
+ if (dp->config.misc0 & ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC) {
+ reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_N_VID, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_M_VID, mode->clock);
+ rate = zynqmp_disp_get_aud_clk_rate(dp->dpsub->disp);
+ if (rate) {
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_N_AUD, reg);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_M_AUD,
+ rate / 1000);
+ }
+ }
+
+ /* Only 2 channel audio is supported now */
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
+
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_PIXEL_WIDTH, 1);
+
+ /* Translate to the native 16 bit datapath based on IP core spec */
+ wpl = (mode->hdisplay * dp->config.bpp + 15) / 16;
+ reg = wpl + wpl % lane_cnt - lane_cnt;
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_USER_DATA_CNT_PER_LANE, reg);
+}
+
+/*
+ * DRM connector functions
+ */
+
+static enum drm_connector_status
+zynqmp_dp_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct zynqmp_dp_link_config *link_config = &dp->link_config;
+ u32 state, i;
+ int ret;
+
+ /*
+ * This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
+ * get the HPD signal with some monitors.
+ */
+ for (i = 0; i < 10; i++) {
+ state = zynqmp_dp_read(dp->iomem,
+ ZYNQMP_DP_TX_INTR_SIGNAL_STATE);
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD)
+ break;
+ msleep(100);
+ }
+
+ if (state & ZYNQMP_DP_TX_INTR_SIGNAL_STATE_HPD) {
+ dp->status = connector_status_connected;
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read first try fails");
+ ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ dev_dbg(dp->dev, "DPCD read retry fails");
+ goto disconnected;
+ }
+ }
+
+ link_config->max_rate = min_t(int,
+ drm_dp_max_link_rate(dp->dpcd),
+ DP_HIGH_BIT_RATE2);
+ link_config->max_lanes = min_t(u8,
+ drm_dp_max_lane_count(dp->dpcd),
+ dp->num_lanes);
+
+ return connector_status_connected;
+ }
+
+disconnected:
+ dp->status = connector_status_disconnected;
+ return connector_status_disconnected;
+}
+
+static int zynqmp_dp_connector_get_modes(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &dp->aux.ddc);
+ if (!edid)
+ return 0;
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static struct drm_encoder *
+zynqmp_dp_connector_best_encoder(struct drm_connector *connector)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ return &dp->encoder;
+}
+
+static int zynqmp_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int max_rate = dp->link_config.max_rate;
+ int rate;
+
+ if (mode->clock > ZYNQMP_MAX_FREQ) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ /* Check with link rate and lane count */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_dbg(dp->dev, "filtered the mode, %s,for high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static void zynqmp_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int
+zynqmp_dp_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+ int ret;
+
+ if (property == dp->sync_prop) {
+ zynqmp_dp_set_sync_mode(dp, val);
+ } else if (property == dp->bpc_prop) {
+ u8 bpc;
+
+ bpc = zynqmp_dp_set_bpc(dp, val);
+ if (bpc) {
+ drm_object_property_set_value(&connector->base,
+ property, bpc);
+ ret = -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+zynqmp_dp_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct zynqmp_dp *dp = connector_to_dp(connector);
+
+ if (property == dp->sync_prop)
+ *val = zynqmp_dp_get_sync_mode(dp);
+ else if (property == dp->bpc_prop)
+ *val = zynqmp_dp_get_bpc(dp);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct drm_connector_funcs zynqmp_dp_connector_funcs = {
+ .detect = zynqmp_dp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = zynqmp_dp_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_set_property = zynqmp_dp_connector_atomic_set_property,
+ .atomic_get_property = zynqmp_dp_connector_atomic_get_property,
+};
+
+static struct drm_connector_helper_funcs zynqmp_dp_connector_helper_funcs = {
+ .get_modes = zynqmp_dp_connector_get_modes,
+ .best_encoder = zynqmp_dp_connector_best_encoder,
+ .mode_valid = zynqmp_dp_connector_mode_valid,
+};
+
+/*
+ * DRM encoder functions
+ */
+
+static void zynqmp_dp_encoder_enable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+ unsigned int i;
+ int ret = 0;
+
+ pm_runtime_get_sync(dp->dev);
+ dp->enabled = true;
+ zynqmp_dp_init_aux(dp);
+ zynqmp_dp_update_misc(dp);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
+ if (dp->status == connector_status_connected) {
+ for (i = 0; i < 3; i++) {
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ usleep_range(300, 500);
+ }
+ /* Some monitors take time to wake up properly */
+ msleep(zynqmp_dp_power_on_delay_ms);
+ }
+ if (ret != 1)
+ dev_dbg(dp->dev, "DP aux failed\n");
+ else
+ zynqmp_dp_train_loop(dp);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_SW_RESET,
+ ZYNQMP_DP_TX_SW_RESET_ALL);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 1);
+}
+
+static void zynqmp_dp_encoder_disable(struct drm_encoder *encoder)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ void __iomem *iomem = dp->iomem;
+
+ dp->enabled = false;
+ cancel_delayed_work(&dp->hpd_work);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_ENABLE_MAIN_STREAM, 0);
+ drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ if (zynqmp_disp_aud_enabled(dp->dpsub->disp))
+ zynqmp_dp_write(iomem, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+ pm_runtime_put_sync(dp->dev);
+}
+
+static void
+zynqmp_dp_encoder_atomic_mode_set(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *connector_state)
+{
+ struct zynqmp_dp *dp = encoder_to_dp(encoder);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ u8 max_lanes = dp->link_config.max_lanes;
+ u8 bpp = dp->config.bpp;
+ int rate, max_rate = dp->link_config.max_rate;
+ int ret;
+
+ /* Check again as bpp or format might have been chagned */
+ rate = zynqmp_dp_max_rate(max_rate, max_lanes, bpp);
+ if (mode->clock > rate) {
+ dev_err(dp->dev, "the mode, %s,has too high pixel rate\n",
+ mode->name);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ ret = zynqmp_dp_mode_configure(dp, adjusted_mode->clock, 0);
+ if (ret < 0)
+ return;
+
+ zynqmp_dp_encoder_mode_set_transfer_unit(dp, adjusted_mode);
+}
+
+#define ZYNQMP_DP_MIN_H_BACKPORCH 20
+
+static int
+zynqmp_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ int diff = mode->htotal - mode->hsync_end;
+
+ /*
+ * ZynqMP DP requires horizontal backporch to be greater than 12.
+ * This limitation may not be compatible with the sink device.
+ */
+ if (diff < ZYNQMP_DP_MIN_H_BACKPORCH) {
+ int vrefresh = (adjusted_mode->clock * 1000) /
+ (adjusted_mode->vtotal * adjusted_mode->htotal);
+
+ dev_dbg(encoder->dev->dev, "hbackporch adjusted: %d to %d",
+ diff, ZYNQMP_DP_MIN_H_BACKPORCH - diff);
+ diff = ZYNQMP_DP_MIN_H_BACKPORCH - diff;
+ adjusted_mode->htotal += diff;
+ adjusted_mode->clock = adjusted_mode->vtotal *
+ adjusted_mode->htotal * vrefresh / 1000;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs zynqmp_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_encoder_helper_funcs zynqmp_dp_encoder_helper_funcs = {
+ .enable = zynqmp_dp_encoder_enable,
+ .disable = zynqmp_dp_encoder_disable,
+ .atomic_mode_set = zynqmp_dp_encoder_atomic_mode_set,
+ .atomic_check = zynqmp_dp_encoder_atomic_check,
+};
+
+/*
+ * Component functions
+ */
+
+static void zynqmp_dp_hpd_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp;
+
+ dp = container_of(work, struct zynqmp_dp, hpd_work.work);
+
+ if (dp->drm)
+ drm_helper_hpd_irq_event(dp->drm);
+}
+
+static struct drm_prop_enum_list zynqmp_dp_bpc_enum[] = {
+ { 6, "6BPC" },
+ { 8, "8BPC" },
+ { 10, "10BPC" },
+ { 12, "12BPC" },
+};
+
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_device *drm = data;
+ struct device_node *port;
+ int ret;
+
+ if (!dp->num_lanes)
+ return 0;
+
+ encoder->possible_crtcs |= zynqmp_disp_get_crtc_mask(dpsub->disp);
+ for_each_child_of_node(dev->of_node, port) {
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+ encoder->possible_crtcs |= drm_of_find_possible_crtcs(drm,
+ port);
+ }
+ drm_encoder_init(drm, encoder, &zynqmp_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &zynqmp_dp_encoder_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(encoder->dev, connector,
+ &zynqmp_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize the drm connector");
+ goto error_encoder;
+ }
+
+ drm_connector_helper_add(connector, &zynqmp_dp_connector_helper_funcs);
+ drm_connector_register(connector);
+ drm_connector_attach_encoder(connector, encoder);
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ dp->drm = drm;
+ dp->sync_prop = drm_property_create_bool(drm, 0, "sync");
+ dp->bpc_prop = drm_property_create_enum(drm, 0, "bpc",
+ zynqmp_dp_bpc_enum,
+ ARRAY_SIZE(zynqmp_dp_bpc_enum));
+
+ dp->config.misc0 &= ~ZYNQMP_DP_TX_MAIN_STREAM_MISC0_SYNC;
+ drm_object_attach_property(&connector->base, dp->sync_prop, false);
+ ret = zynqmp_dp_set_bpc(dp, 8);
+ drm_object_attach_property(&connector->base, dp->bpc_prop,
+ ret ? ret : 8);
+ zynqmp_dp_update_bpp(dp);
+
+ INIT_DELAYED_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+
+ /* This enables interrupts, so should be called after DRM init */
+ ret = zynqmp_dp_init_aux(dp);
+ if (ret) {
+ dev_err(dp->dev, "failed to initialize DP aux");
+ goto error_prop;
+ }
+
+ return 0;
+
+error_prop:
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+error_encoder:
+ drm_encoder_cleanup(&dp->encoder);
+ return ret;
+}
+
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(dev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ disable_irq(dp->irq);
+ if (!dp->num_lanes)
+ return;
+
+ cancel_delayed_work_sync(&dp->hpd_work);
+ zynqmp_dp_exit_aux(dp);
+ drm_property_destroy(dp->drm, dp->bpc_prop);
+ drm_property_destroy(dp->drm, dp->sync_prop);
+ zynqmp_dp_connector_destroy(&dp->connector);
+ drm_encoder_cleanup(&dp->encoder);
+}
+
+/*
+ * Platform functions
+ */
+
+static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
+ u32 status, mask;
+
+ status = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS);
+ mask = zynqmp_dp_read(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_MASK);
+ if (!(status & ~mask))
+ return IRQ_NONE;
+
+ /* dbg for diagnostic, but not much that the driver can do */
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_UNDERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "underflow interrupt\n");
+ if (status & ZYNQMP_DP_TX_INTR_CHBUF_OVERFLW_MASK)
+ dev_dbg_ratelimited(dp->dev, "overflow interrupt\n");
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_SUB_TX_INTR_STATUS, status);
+
+ /* The DP vblank will not be enabled with remote crtc device */
+ if (status & ZYNQMP_DP_TX_INTR_VBLANK_START)
+ zynqmp_disp_handle_vblank(dp->dpsub->disp);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_EVENT)
+ schedule_delayed_work(&dp->hpd_work, 0);
+
+ if (status & ZYNQMP_DP_TX_INTR_HPD_IRQ) {
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (ret < 0)
+ goto handled;
+
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+
+handled:
+ return IRQ_HANDLED;
+}
+
+int zynqmp_dp_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ struct zynqmp_dp *dp;
+ struct resource *res;
+ unsigned int i;
+ int irq, ret;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dpms = DRM_MODE_DPMS_OFF;
+ dp->status = connector_status_disconnected;
+ dp->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
+ dp->iomem = devm_ioremap_resource(dp->dev, res);
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_PHY_POWER_DOWN,
+ ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
+ zynqmp_dp_set(dp->iomem, ZYNQMP_DP_TX_PHY_CONFIG,
+ ZYNQMP_DP_TX_PHY_CONFIG_ALL_RESET);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_FORCE_SCRAMBLER_RESET, 1);
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+
+ dp->num_lanes = 2;
+ for (i = 0; i < ZYNQMP_DP_MAX_LANES; i++) {
+ char phy_name[16];
+
+ snprintf(phy_name, sizeof(phy_name), "dp-phy%d", i);
+ dp->phy[i] = devm_phy_get(dp->dev, phy_name);
+ if (IS_ERR(dp->phy[i])) {
+ ret = PTR_ERR(dp->phy[i]);
+ dp->phy[i] = NULL;
+
+ /* 2nd lane is optional */
+ if (i == 1 && ret == -ENODEV) {
+ dp->num_lanes = 1;
+ break;
+ }
+
+ /*
+ * If no phy lane is assigned, the DP Tx gets disabled.
+ * The display part of the DP subsystem can still be
+ * used to drive the output to FPGA, thus let the DP
+ * subsystem driver to proceed without this DP Tx.
+ */
+ if (i == 0 && ret == -ENODEV) {
+ dp->num_lanes = 0;
+ goto out;
+ }
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dp->dev, "failed to get phy lane\n");
+
+ return ret;
+ }
+ }
+
+ ret = zynqmp_dp_init_phy(dp);
+ if (ret)
+ goto error_phy;
+
+ dp->aux.name = "ZynqMP DP AUX";
+ dp->aux.dev = dp->dev;
+ dp->aux.transfer = zynqmp_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to initialize DP aux\n");
+ goto error;
+ }
+
+out:
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto error;
+ }
+
+ ret = devm_request_threaded_irq(dp->dev, irq, NULL,
+ zynqmp_dp_irq_handler, IRQF_ONESHOT,
+ dev_name(dp->dev), dp);
+ if (ret < 0)
+ goto error;
+ dp->irq = irq;
+
+ dpsub = platform_get_drvdata(pdev);
+ dpsub->dp = dp;
+ dp->dpsub = dpsub;
+
+ dev_dbg(dp->dev,
+ "ZynqMP DisplayPort Tx driver probed with %u phy lanes\n",
+ dp->num_lanes);
+
+ return 0;
+
+error:
+ drm_dp_aux_unregister(&dp->aux);
+error_phy:
+ zynqmp_dp_exit_phy(dp);
+ return ret;
+}
+
+int zynqmp_dp_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ struct zynqmp_dp *dp = dpsub->dp;
+
+ zynqmp_dp_write(dp->iomem, ZYNQMP_DP_TX_ENABLE, 0);
+ drm_dp_aux_unregister(&dp->aux);
+ zynqmp_dp_exit_phy(dp);
+ dpsub->dp = NULL;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
new file mode 100644
index 000000000000..2f6ce3f3e8cf
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DP_H_
+#define _ZYNQMP_DP_H_
+
+struct zynqmp_dp;
+struct drm_display_mode;
+
+const int zynqmp_dp_set_color(struct zynqmp_dp *dp, const char *color);
+void zynqmp_dp_enable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
+void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
+ struct drm_display_mode *mode);
+void __maybe_unused zynqmp_dp_pm_suspend(struct zynqmp_dp *dp);
+void __maybe_unused zynqmp_dp_pm_resume(struct zynqmp_dp *dp);
+int zynqmp_dp_bind(struct device *dev, struct device *master, void *data);
+void zynqmp_dp_unbind(struct device *dev, struct device *master, void *data);
+
+int zynqmp_dp_probe(struct platform_device *pdev);
+int zynqmp_dp_remove(struct platform_device *pdev);
+
+#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
new file mode 100644
index 000000000000..9b3545348f7b
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DP Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "xlnx_drv.h"
+
+#include "zynqmp_disp.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+static int
+zynqmp_dpsub_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+
+ ret = zynqmp_disp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ /* zynqmp_disp should bind first, so zynqmp_dp encoder can find crtc */
+ ret = zynqmp_dp_bind(dev, master, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+zynqmp_dpsub_unbind(struct device *dev, struct device *master, void *data)
+{
+ zynqmp_dp_unbind(dev, master, data);
+ zynqmp_disp_unbind(dev, master, data);
+}
+
+static const struct component_ops zynqmp_dpsub_component_ops = {
+ .bind = zynqmp_dpsub_bind,
+ .unbind = zynqmp_dpsub_unbind,
+};
+
+static int zynqmp_dpsub_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub;
+ int ret;
+
+ dpsub = devm_kzalloc(&pdev->dev, sizeof(*dpsub), GFP_KERNEL);
+ if (!dpsub)
+ return -ENOMEM;
+
+ /* Sub-driver will access dpsub from drvdata */
+ platform_set_drvdata(pdev, dpsub);
+ pm_runtime_enable(&pdev->dev);
+
+ /*
+ * DP should be probed first so that the zynqmp_disp can set the output
+ * format accordingly.
+ */
+ ret = zynqmp_dp_probe(pdev);
+ if (ret)
+ goto err_pm;
+
+ ret = zynqmp_disp_probe(pdev);
+ if (ret)
+ goto err_dp;
+
+ ret = component_add(&pdev->dev, &zynqmp_dpsub_component_ops);
+ if (ret)
+ goto err_disp;
+
+ /* Try the reserved memory. Proceed if there's none */
+ of_reserved_mem_device_init(&pdev->dev);
+
+ /* Populate the sound child nodes */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to populate child nodes\n");
+ goto err_rmem;
+ }
+
+ dpsub->master = xlnx_drm_pipeline_init(pdev);
+ if (IS_ERR(dpsub->master)) {
+ dev_err(&pdev->dev, "failed to initialize the drm pipeline\n");
+ goto err_populate;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
+
+ return 0;
+
+err_populate:
+ of_platform_depopulate(&pdev->dev);
+err_rmem:
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+err_disp:
+ zynqmp_disp_remove(pdev);
+err_dp:
+ zynqmp_dp_remove(pdev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_dpsub_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ int err, ret = 0;
+
+ xlnx_drm_pipeline_exit(dpsub->master);
+ of_platform_depopulate(&pdev->dev);
+ of_reserved_mem_device_release(&pdev->dev);
+ component_del(&pdev->dev, &zynqmp_dpsub_component_ops);
+
+ err = zynqmp_disp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ err = zynqmp_dp_remove(pdev);
+ if (err)
+ ret = -EIO;
+
+ pm_runtime_disable(&pdev->dev);
+
+ return err;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_suspend(dpsub->dp);
+
+ return 0;
+}
+
+static int __maybe_unused zynqmp_dpsub_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev, struct platform_device, dev);
+ struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+
+ zynqmp_dp_pm_resume(dpsub->dp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dpsub_pm_suspend,
+ zynqmp_dpsub_pm_resume)
+};
+
+static const struct of_device_id zynqmp_dpsub_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dpsub-1.7", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
+
+static struct platform_driver zynqmp_dpsub_driver = {
+ .probe = zynqmp_dpsub_probe,
+ .remove = zynqmp_dpsub_remove,
+ .driver = {
+ .name = "zynqmp-display",
+ .of_match_table = zynqmp_dpsub_of_match,
+ .pm = &zynqmp_dpsub_pm_ops,
+ },
+};
+
+module_platform_driver(zynqmp_dpsub_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
new file mode 100644
index 000000000000..6606beffee15
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DPSUB Subsystem Driver
+ *
+ * Copyright (C) 2017 - 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZYNQMP_DPSUB_H_
+#define _ZYNQMP_DPSUB_H_
+
+struct zynqmp_dpsub {
+ struct zynqmp_dp *dp;
+ struct zynqmp_disp *disp;
+ struct platform_device *master;
+};
+
+#endif /* _ZYNQMP_DPSUB_H_ */
diff --git a/drivers/gpu/drm/zocl/Kconfig b/drivers/gpu/drm/zocl/Kconfig
new file mode 100644
index 000000000000..6a54d01cccd1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Kconfig
@@ -0,0 +1,8 @@
+config DRM_ZOCL
+ tristate "Xilinx Zynq OpenCL"
+ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ Xilinx Zynq OpenCL Manager
diff --git a/drivers/gpu/drm/zocl/Makefile b/drivers/gpu/drm/zocl/Makefile
new file mode 100644
index 000000000000..da58e5084f9d
--- /dev/null
+++ b/drivers/gpu/drm/zocl/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+zocl-y := zocl_drv.o zocl_bo.o
+
+obj-$(CONFIG_DRM_ZOCL) += zocl.o
diff --git a/drivers/gpu/drm/zocl/zocl_bo.c b/drivers/gpu/drm/zocl/zocl_bo.c
new file mode 100644
index 000000000000..bd4d80223fa8
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_bo.c
@@ -0,0 +1,271 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+void zocl_describe(const struct drm_zocl_bo *obj)
+{
+ size_t size_in_kb = obj->base.base.size / 1024;
+ size_t physical_addr = obj->base.paddr;
+
+ DRM_INFO("%p: H[0x%zxKB] D[0x%zx]\n",
+ obj,
+ size_in_kb,
+ physical_addr);
+}
+
+static struct drm_zocl_bo *zocl_create_bo(struct drm_device *dev,
+ uint64_t unaligned_size)
+{
+ size_t size = PAGE_ALIGN(unaligned_size);
+ struct drm_gem_cma_object *cma_obj;
+
+ DRM_DEBUG("%s:%s:%d: %zd\n", __FILE__, __func__, __LINE__, size);
+
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ cma_obj = drm_gem_cma_create(dev, size);
+ if (IS_ERR(cma_obj))
+ return ERR_PTR(-ENOMEM);
+
+ return to_zocl_bo(&cma_obj->base);
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ int ret;
+ struct drm_zocl_create_bo *args = data;
+ struct drm_zocl_bo *bo;
+
+ if (((args->flags & DRM_ZOCL_BO_FLAGS_COHERENT) == 0) ||
+ ((args->flags & DRM_ZOCL_BO_FLAGS_CMA) == 0))
+ return -EINVAL;
+
+ bo = zocl_create_bo(dev, args->size);
+ bo->flags |= DRM_ZOCL_BO_FLAGS_COHERENT;
+ bo->flags |= DRM_ZOCL_BO_FLAGS_CMA;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, bo);
+
+ if (IS_ERR(bo)) {
+ DRM_DEBUG("object creation failed\n");
+ return PTR_ERR(bo);
+ }
+ ret = drm_gem_handle_create(filp, &bo->base.base, &args->handle);
+ if (ret) {
+ drm_gem_cma_free_object(&bo->base.base);
+ DRM_DEBUG("handle creation failed\n");
+ return ret;
+ }
+
+ zocl_describe(bo);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
+
+ return ret;
+}
+
+int zocl_map_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ struct drm_zocl_map_bo *args = data;
+ struct drm_gem_object *gem_obj;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+ gem_obj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ /* The mmap offset was set up at BO allocation time. */
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ zocl_describe(to_zocl_bo(gem_obj));
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_sync_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_sync_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ void *kaddr;
+ int ret = 0;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size) ||
+ ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+
+ /* only invalidate the range of addresses requested by the user */
+ kaddr += args->offset;
+
+ if (args->dir == DRM_ZOCL_SYNC_BO_TO_DEVICE)
+ flush_kernel_vmap_range(kaddr, args->size);
+ else if (args->dir == DRM_ZOCL_SYNC_BO_FROM_DEVICE)
+ invalidate_kernel_vmap_range(kaddr, args->size);
+ else
+ ret = -EINVAL;
+
+out:
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_info_bo_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_bo *bo;
+ struct drm_zocl_info_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ bo = to_zocl_bo(gem_obj);
+
+ args->size = bo->base.base.size;
+ args->paddr = bo->base.paddr;
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return 0;
+}
+
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pwrite_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_READ, user_data, args->size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_from_user(kaddr, user_data, args->size);
+out:
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return ret;
+}
+
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ const struct drm_zocl_pread_bo *args = data;
+ struct drm_gem_object *gem_obj = drm_gem_object_lookup(dev, filp,
+ args->handle);
+ char __user *user_data = to_user_ptr(args->data_ptr);
+ int ret = 0;
+ void *kaddr;
+
+ DRM_DEBUG("%s:%s:%d: %p\n", __FILE__, __func__, __LINE__, data);
+
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ return -EINVAL;
+ }
+
+ if ((args->offset > gem_obj->size) || (args->size > gem_obj->size)
+ || ((args->offset + args->size) > gem_obj->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (args->size == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_WRITE, user_data, args->size)) {
+ ret = EFAULT;
+ goto out;
+ }
+
+ kaddr = drm_gem_cma_prime_vmap(gem_obj);
+ kaddr += args->offset;
+
+ ret = copy_to_user(user_data, kaddr, args->size);
+
+out:
+ drm_gem_object_unreference_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/zocl/zocl_drv.c b/drivers/gpu/drm/zocl/zocl_drv.c
new file mode 100644
index 000000000000..9b05de3343cf
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.c
@@ -0,0 +1,217 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/ramfs.h>
+#include <linux/shmem_fs.h>
+#include "zocl_drv.h"
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/zocl_ioctl.h>
+
+#define ZOCL_DRIVER_NAME "zocl"
+#define ZOCL_DRIVER_DESC "Zynq BO manager"
+#define ZOCL_DRIVER_DATE "20161024"
+#define ZOCL_DRIVER_MAJOR 2016
+#define ZOCL_DRIVER_MINOR 3
+#define ZOCL_DRIVER_PATCHLEVEL 1
+#define ZOCL_FILE_PAGE_OFFSET 0x00100000
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+static const struct vm_operations_struct reg_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int zocl_drm_load(struct drm_device *drm, unsigned long flags)
+{
+ struct platform_device *pdev;
+ struct resource *res;
+ struct drm_zocl_dev *zdev;
+ void __iomem *map;
+
+ pdev = to_platform_device(drm->dev);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ map = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(map)) {
+ DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ zdev = devm_kzalloc(drm->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->ddev = drm;
+ drm->dev_private = zdev;
+ zdev->regs = map;
+ zdev->res_start = res->start;
+ zdev->res_len = resource_size(res);
+ platform_set_drvdata(pdev, zdev);
+
+ return 0;
+}
+
+static int zocl_drm_unload(struct drm_device *drm)
+{
+ return 0;
+}
+
+static void zocl_free_object(struct drm_gem_object *obj)
+{
+ struct drm_zocl_bo *zocl_obj = to_zocl_bo(obj);
+
+ DRM_INFO("Freeing BO\n");
+ zocl_describe(zocl_obj);
+ drm_gem_cma_free_object(obj);
+}
+
+static int zocl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_zocl_dev *zdev = dev->dev_private;
+ unsigned long vsize;
+ int rc;
+
+ /* If the page offset is > than 4G, then let GEM handle that and do what
+ * it thinks is best,we will only handle page offsets less than 4G.
+ */
+ if (likely(vma->vm_pgoff >= ZOCL_FILE_PAGE_OFFSET))
+ return drm_gem_cma_mmap(filp, vma);
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > zdev->res_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ vma->vm_ops = &reg_physical_vm_ops;
+ rc = io_remap_pfn_range(vma, vma->vm_start,
+ zdev->res_start >> PAGE_SHIFT,
+ vsize, vma->vm_page_prot);
+
+ return rc;
+}
+
+static const struct drm_ioctl_desc zocl_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(ZOCL_CREATE_BO, zocl_create_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_MAP_BO, zocl_map_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_SYNC_BO, zocl_sync_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_INFO_BO, zocl_info_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PWRITE_BO, zocl_pwrite_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(ZOCL_PREAD_BO, zocl_pread_bo_ioctl,
+ DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations zocl_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = zocl_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+};
+
+static struct drm_driver zocl_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
+ .load = zocl_drm_load,
+ .unload = zocl_drm_unload,
+ .gem_free_object = zocl_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .ioctls = zocl_ioctls,
+ .num_ioctls = ARRAY_SIZE(zocl_ioctls),
+ .fops = &zocl_driver_fops,
+ .name = ZOCL_DRIVER_NAME,
+ .desc = ZOCL_DRIVER_DESC,
+ .date = ZOCL_DRIVER_DATE,
+ .major = ZOCL_DRIVER_MAJOR,
+ .minor = ZOCL_DRIVER_MINOR,
+ .patchlevel = ZOCL_DRIVER_PATCHLEVEL,
+};
+
+/* init xilinx opencl drm platform */
+static int zocl_drm_platform_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&zocl_driver, pdev);
+}
+
+/* exit xilinx opencl drm platform */
+static int zocl_drm_platform_remove(struct platform_device *pdev)
+{
+ struct drm_zocl_dev *zdev = platform_get_drvdata(pdev);
+
+ if (zdev->ddev) {
+ drm_dev_unregister(zdev->ddev);
+ drm_dev_unref(zdev->ddev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id zocl_drm_of_match[] = {
+ { .compatible = "xlnx,zocl", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, zocl_drm_of_match);
+
+static struct platform_driver zocl_drm_private_driver = {
+ .probe = zocl_drm_platform_probe,
+ .remove = zocl_drm_platform_remove,
+ .driver = {
+ .name = "zocl-drm",
+ .of_match_table = zocl_drm_of_match,
+ },
+};
+
+module_platform_driver(zocl_drm_private_driver);
+
+MODULE_VERSION(__stringify(ZOCL_DRIVER_MAJOR) "."
+ __stringify(ZOCL_DRIVER_MINOR) "."
+ __stringify(ZOCL_DRIVER_PATCHLEVEL));
+
+MODULE_DESCRIPTION(ZOCL_DRIVER_DESC);
+MODULE_AUTHOR("Sonal Santan <sonal.santan@xilinx.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/zocl/zocl_drv.h b/drivers/gpu/drm/zocl/zocl_drv.h
new file mode 100644
index 000000000000..ef6a9acadfc1
--- /dev/null
+++ b/drivers/gpu/drm/zocl/zocl_drv.h
@@ -0,0 +1,59 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ZOCL_DRV_H_
+#define _ZOCL_DRV_H_
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+struct drm_zocl_bo {
+ struct drm_gem_cma_object base;
+ uint32_t flags;
+};
+
+struct drm_zocl_dev {
+ struct drm_device *ddev;
+ void __iomem *regs;
+ phys_addr_t res_start;
+ resource_size_t res_len;
+ unsigned int irq;
+};
+
+static inline struct drm_zocl_bo *to_zocl_bo(struct drm_gem_object *bo)
+{
+ return (struct drm_zocl_bo *) bo;
+}
+
+int zocl_create_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_sync_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_map_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_info_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pwrite_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int zocl_pread_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+void zocl_describe(const struct drm_zocl_bo *obj);
+
+#endif
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index de12a565006d..b1f95e192c2a 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -218,6 +218,26 @@ config SENSORS_TPS53679
This driver can also be built as a module. If so, the module will
be called tps53679.
+config SENSORS_TPS544
+ tristate "TI TPS544"
+ help
+ If you say yes here you get hardware monitoring support for Texas
+ Instruments TPS544.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps544.
+
+config SENSORS_TPS544_REGULATOR
+ bool "Regulator support for TPS544"
+ depends on SENSORS_TPS544
+ select REGULATOR
+ help
+ If you say yes here you get regulator support for Texas Instruments
+ TPS544.
+
+ This driver is dependent on TPS544 sensor and it can also be built
+ as a module.
+
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 5feb45806123..4f41c81a2729 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
+obj-$(CONFIG_SENSORS_TPS544) += tps544.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
diff --git a/drivers/hwmon/pmbus/tps544.c b/drivers/hwmon/pmbus/tps544.c
new file mode 100644
index 000000000000..ad5082efcccd
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps544.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPS544B25 power regulator driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include "pmbus.h"
+
+#define TPS544_NUM_PAGES 1
+
+struct tps544_data {
+ struct device *dev;
+ u16 vout_min[TPS544_NUM_PAGES], vout_max[TPS544_NUM_PAGES];
+ struct pmbus_driver_info info;
+};
+
+struct vlut {
+ int vol;
+ u16 vloop;
+ u16 v_ovfault;
+ u16 v_ovwarn;
+ u16 vmax;
+ u16 mfr_vmin;
+ u16 v_uvwarn;
+ u16 v_uvfault;
+};
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+#define TPS544_MFR_VOUT_MIN 0xA4
+#define TPS544_MFR_RESTORE_DEF_ALL 0x12
+#define TPS544_MFR_IOUT_CAL_OFFSET 0x39
+
+#define TPS544_VOUTREAD_MULTIPLIER 1950
+#define TPS544_IOUTREAD_MULTIPLIER 62500
+#define TPS544_IOUTREAD_MASK GENMASK(9, 0)
+
+#define TPS544_VOUT_LIMIT 5300000
+
+#define to_tps544_data(x) container_of(x, struct tps544_data, info)
+
+/*
+ * This currently supports 3 voltage out buckets:
+ * 0.5V to 1.3V
+ * 1.3V to 2.6V
+ * 2.6V to 5.3V
+ * Any requested voltage will be mapped to one of these buckets and
+ * VOUT will be programmed with 0.1V granularity.
+ */
+static const struct vlut tps544_vout[3] = {
+ {500000, 0xF004, 0x0290, 0x0285, 0x0300, 0x0100, 0x00CD, 0x009A},
+ {1300000, 0xF002, 0x059A, 0x0566, 0x0600, 0x0100, 0x0143, 0x0130},
+ {2600000, 0xF001, 0x0B00, 0x0A9A, 0x0A00, 0x0100, 0x0143, 0x0130}
+};
+#endif
+
+static int tps544_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_read_word_data(client, page, reg);
+}
+
+static int tps544_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static int tps544_write_byte(struct i2c_client *client, int page, u8 byte)
+{
+ return pmbus_write_byte(client, page, byte);
+}
+
+static int tps544_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ ret = pmbus_write_word_data(client, page, reg, word);
+ /* TODO - Define new PMBUS virtual register entries for these */
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+static int tps544_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int page = 0;
+
+ return pmbus_read_word_data(client, page, PMBUS_READ_VOUT);
+}
+
+static int tps544_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct device *dev = rdev_get_dev(rdev);
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct tps544_data *data = to_tps544_data(info);
+ int index, page = 0;
+ u16 vout;
+
+ /* voltage will be set close to min value requested */
+ vout = (u16)((min_uV * 512) / 1000000);
+
+ /* Check voltage bucket */
+ if (min_uV >= tps544_vout[2].vol)
+ index = 2;
+ else if (min_uV >= tps544_vout[1].vol)
+ index = 1;
+ else if (min_uV >= tps544_vout[0].vol)
+ index = 0;
+ else
+ return -EINVAL;
+
+ pmbus_write_word_data(client, page, PMBUS_VOUT_SCALE_LOOP,
+ tps544_vout[index].vloop);
+ /* Use delay after setting scale loop; this is derived from testing */
+ msleep(2000);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_OV_FAULT_LIMIT,
+ tps544_vout[index].v_ovfault);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_OV_WARN_LIMIT,
+ tps544_vout[index].v_ovwarn);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_MAX,
+ tps544_vout[index].vmax);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND, vout);
+ tps544_write_word_data(client, page, TPS544_MFR_VOUT_MIN,
+ tps544_vout[index].mfr_vmin);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_UV_WARN_LIMIT,
+ tps544_vout[index].v_uvwarn);
+ pmbus_write_word_data(client, page, PMBUS_VOUT_UV_FAULT_LIMIT,
+ tps544_vout[index].v_uvfault);
+
+ data->vout_min[page] = min_uV;
+ data->vout_max[page] = max_uV;
+
+ return 0;
+}
+
+static ssize_t tps544_setv_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ int vout;
+
+ vout = tps544_regulator_get_voltage(rdev) * TPS544_VOUTREAD_MULTIPLIER;
+ return sprintf(buf, "%d\n", vout);
+}
+
+static ssize_t tps544_setv_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ int val;
+ int err;
+
+ err = kstrtoint(buf, 0, &val);
+ if (err)
+ return err;
+ if (val > TPS544_VOUT_LIMIT)
+ return -EINVAL;
+
+ err = tps544_regulator_set_voltage(rdev, val, val, NULL);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(tps544_setv);
+
+static ssize_t tps544_restorev_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int err;
+
+ err = pmbus_write_byte(client, 0, TPS544_MFR_RESTORE_DEF_ALL);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(tps544_restorev);
+
+static ssize_t tps544_geti_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ u16 reg_iout;
+
+ reg_iout = pmbus_read_word_data(client, 0, PMBUS_READ_IOUT) &
+ TPS544_IOUTREAD_MASK;
+
+ return sprintf(buf, "%d\n", reg_iout * TPS544_IOUTREAD_MULTIPLIER);
+}
+
+static DEVICE_ATTR_RO(tps544_geti);
+
+static ssize_t tps544_setcali_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ int reg_cali;
+
+ reg_cali = pmbus_read_word_data(client, 0, TPS544_MFR_IOUT_CAL_OFFSET);
+
+ return sprintf(buf, "Current: 0x%x; Set value in hex to calibrate\n",
+ reg_cali);
+}
+
+static ssize_t tps544_setcali_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ u16 val;
+ int err;
+
+ err = kstrtou16(buf, 0x0, &val);
+ if (err)
+ return err;
+
+ err = pmbus_write_word_data(client, 0, TPS544_MFR_IOUT_CAL_OFFSET, val);
+ if (err)
+ return err;
+
+ return (ssize_t)count;
+}
+
+static DEVICE_ATTR_RW(tps544_setcali);
+
+static struct attribute *reg_attrs[] = {
+ &dev_attr_tps544_setv.attr,
+ &dev_attr_tps544_restorev.attr,
+ &dev_attr_tps544_geti.attr,
+ &dev_attr_tps544_setcali.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(reg);
+
+static const struct regulator_desc tps544_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+#endif /* CONFIG_SENSORS_TPS544_REGULATOR */
+
+static int tps544_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ unsigned int i;
+ struct device *dev = &client->dev;
+ struct tps544_data *data;
+ struct pmbus_driver_info *info;
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ int ret;
+ struct regulator_dev *rdev;
+ struct regulator_config rconfig = { };
+#endif
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct tps544_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ info = &data->info;
+ info->write_word_data = tps544_write_word_data;
+ /* TODO - remove these 3 hooks maybe unnecessary */
+ info->write_byte = tps544_write_byte;
+ info->read_word_data = tps544_read_word_data;
+ info->read_byte_data = tps544_read_byte_data;
+
+ for (i = 0; i < ARRAY_SIZE(data->vout_min); i++)
+ data->vout_min[i] = 0xffff;
+
+ info->pages = TPS544_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ rconfig.dev = dev;
+ rconfig.driver_data = data;
+ info->num_regulators = info->pages;
+ info->reg_desc = tps544_reg_desc;
+ if (info->num_regulators > (int)ARRAY_SIZE(tps544_reg_desc)) {
+ dev_err(&client->dev, "num_regulators too large!");
+ info->num_regulators = ARRAY_SIZE(tps544_reg_desc);
+ }
+
+ rdev = devm_regulator_register(dev, tps544_reg_desc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register %s regulator\n",
+ info->reg_desc[0].name);
+ return (int)PTR_ERR(rdev);
+ }
+
+ ret = sysfs_create_groups(&rdev->dev.kobj, reg_groups);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dev, rdev);
+#endif
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static int tps544_remove(struct i2c_client *client)
+{
+#if IS_ENABLED(CONFIG_SENSORS_TPS544_REGULATOR)
+ struct device *dev = &client->dev;
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+ sysfs_remove_groups(&rdev->dev.kobj, reg_groups);
+#endif
+ pmbus_do_remove(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tps544_of_match[] = {
+ { .compatible = "ti,tps544" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps544_of_match);
+#endif
+
+static const struct i2c_device_id tps544_id[] = {
+ {"tps544", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps544_id);
+
+static struct i2c_driver tps544_driver = {
+ .driver = {
+ .name = "tps544",
+ .of_match_table = of_match_ptr(tps544_of_match),
+ },
+ .probe = tps544_probe,
+ .remove = tps544_remove,
+ .id_table = tps544_id,
+};
+
+module_i2c_driver(tps544_driver);
+
+MODULE_AUTHOR("Harini Katakam");
+MODULE_DESCRIPTION("PMBus regulator driver for TPS544");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 1efdabb5adca..730bf81575e7 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -7,13 +7,16 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -22,11 +25,14 @@
#define CDNS_I2C_DATA_OFFSET 0x0C /* I2C Data Register, RW */
#define CDNS_I2C_ISR_OFFSET 0x10 /* IRQ Status Register, RW */
#define CDNS_I2C_XFER_SIZE_OFFSET 0x14 /* Transfer Size Register, RW */
+#define CDNS_I2C_SLV_PAUSE_OFFSET 0x18 /* Transfer Size Register, RW */
#define CDNS_I2C_TIME_OUT_OFFSET 0x1C /* Time Out Register, RW */
+#define CDNS_I2C_IMR_OFFSET 0x20 /* IRQ Mask Register, RO */
#define CDNS_I2C_IER_OFFSET 0x24 /* IRQ Enable Register, WO */
#define CDNS_I2C_IDR_OFFSET 0x28 /* IRQ Disable Register, WO */
/* Control Register Bit mask definitions */
+#define CDNS_I2C_CR_SLVMON BIT(5) /* Slave monitor mode bit */
#define CDNS_I2C_CR_HOLD BIT(4) /* Hold Bus bit */
#define CDNS_I2C_CR_ACK_EN BIT(3)
#define CDNS_I2C_CR_NEA BIT(2)
@@ -42,7 +48,9 @@
/* Status Register Bit mask definitions */
#define CDNS_I2C_SR_BA BIT(8)
+#define CDNS_I2C_SR_TXDV BIT(6)
#define CDNS_I2C_SR_RXDV BIT(5)
+#define CDNS_I2C_SR_RXRW BIT(3)
/*
* I2C Address Register Bit mask definitions
@@ -91,6 +99,14 @@
CDNS_I2C_IXR_DATA | \
CDNS_I2C_IXR_COMP)
+#define CDNS_I2C_IXR_SLAVE_INTR_MASK (CDNS_I2C_IXR_RX_UNF | \
+ CDNS_I2C_IXR_TX_OVF | \
+ CDNS_I2C_IXR_RX_OVF | \
+ CDNS_I2C_IXR_TO | \
+ CDNS_I2C_IXR_NACK | \
+ CDNS_I2C_IXR_DATA | \
+ CDNS_I2C_IXR_COMP)
+
#define CDNS_I2C_TIMEOUT msecs_to_jiffies(1000)
/* timeout for pm runtime autosuspend */
#define CNDS_I2C_PM_TIMEOUT 1000 /* ms */
@@ -114,6 +130,32 @@
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+/**
+ * enum cdns_i2c_mode - I2C Controller current operating mode
+ *
+ * @CDNS_I2C_MODE_MASTER: I2C Controller operating in master mode
+ * @CDNS_I2C_MODE_SLAVE: I2C controller operating in slave mode
+ */
+enum cdns_i2c_mode {
+ CDNS_I2C_MODE_MASTER,
+ CDNS_I2C_MODE_SLAVE,
+};
+
+/**
+ * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode
+ *
+ * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle
+ * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master
+ * @CDNS_I2C_SLAVE_STATE_RECV: I2C slave receiving data from master
+ */
+enum cdns_i2c_slave_state {
+ CDNS_I2C_SLAVE_STATE_IDLE,
+ CDNS_I2C_SLAVE_STATE_SEND,
+ CDNS_I2C_SLAVE_STATE_RECV,
+};
+#endif
+
/**
* struct cdns_i2c - I2C device private data structure
*
@@ -135,6 +177,13 @@
* @clk: Pointer to struct clk
* @clk_rate_change_nb: Notifier block for clock rate changes
* @quirks: flag for broken hold bit usage in r1p10
+ * @ctrl_reg: Cached value of the control register.
+ * @rinfo: Structure holding recovery information.
+ * @pinctrl: Pin control state holder.
+ * @pinctrl_pins_default: Default pin control state.
+ * @pinctrl_pins_gpio: GPIO pin control state.
+ * @slave: Registered slave instance.
+ * @slave_state: I2C Slave state(idle/read/write).
*/
struct cdns_i2c {
struct device *dev;
@@ -155,6 +204,15 @@ struct cdns_i2c {
struct clk *clk;
struct notifier_block clk_rate_change_nb;
u32 quirks;
+ u32 ctrl_reg;
+ struct i2c_bus_recovery_info rinfo;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_pins_default;
+ struct pinctrl_state *pinctrl_pins_gpio;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct i2c_client *slave;
+ enum cdns_i2c_slave_state slave_state;
+#endif
};
struct cdns_platform_data {
@@ -183,17 +241,138 @@ static inline bool cdns_is_holdquirk(struct cdns_i2c *id, bool hold_wrkaround)
(id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1));
}
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static void cdns_i2c_set_mode(enum cdns_i2c_mode mode, struct cdns_i2c *id)
+{
+ /* Disable all interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
+
+ /* Update device state */
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+
+ if (mode == CDNS_I2C_MODE_MASTER) {
+ /* Enable i2c master */
+ id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA
+ | CDNS_I2C_CR_MS | CDNS_I2C_CR_CLR_FIFO;
+ return cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ }
+ /* Enable i2c slave */
+ id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_CLR_FIFO;
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Setting slave address */
+ cdns_i2c_writereg(id->slave->addr & CDNS_I2C_ADDR_MASK,
+ CDNS_I2C_ADDR_OFFSET);
+
+ /* Enable slave send/receive interrupts */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLAVE_INTR_MASK, CDNS_I2C_IER_OFFSET);
+}
+
+static void cdns_i2c_slave_rcv_data(struct cdns_i2c *id)
+{
+ u8 bytes;
+ unsigned char data;
+
+ /* Prepare backend for data reception */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_RECV;
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_REQUESTED, NULL);
+ }
+
+ /* Fetch number of bytes to receive */
+ bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+
+ /* Read data and send to backend */
+ while (bytes--) {
+ data = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ i2c_slave_event(id->slave, I2C_SLAVE_WRITE_RECEIVED, &data);
+ }
+}
+
+static void cdns_i2c_slave_send_data(struct cdns_i2c *id)
+{
+ u8 data;
+
+ /* Prepare backend for data transmission */
+ if (id->slave_state == CDNS_I2C_SLAVE_STATE_IDLE) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_SEND;
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_REQUESTED, &data);
+ } else {
+ i2c_slave_event(id->slave, I2C_SLAVE_READ_PROCESSED, &data);
+ }
+
+ /* Send data over bus */
+ cdns_i2c_writereg(data, CDNS_I2C_DATA_OFFSET);
+}
+
/**
- * cdns_i2c_isr - Interrupt handler for the I2C device
- * @irq: irq number for the I2C device
- * @ptr: void pointer to cdns_i2c structure
+ * cdns_i2c_slave_isr - Interrupt handler for the I2C device in slave role
+ * @ptr: Pointer to I2C device private data
+ *
+ * This function handles the data interrupt and transfer complete interrupt of
+ * the I2C device in slave role.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_slave_isr(void *ptr)
+{
+ struct cdns_i2c *id = ptr;
+ unsigned int isr_status, i2c_status;
+
+ /* Fetch the interrupt status */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Ignore masked interrupts */
+ isr_status &= ~cdns_i2c_readreg(CDNS_I2C_IMR_OFFSET);
+
+ /* Fetch transfer mode (send/receive) */
+ i2c_status = cdns_i2c_readreg(CDNS_I2C_SR_OFFSET);
+
+ /* Handle data send/receive */
+ if (i2c_status & CDNS_I2C_SR_RXRW) {
+ /* Send data to master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_send_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ } else {
+ /* Receive data from master */
+ if (isr_status & CDNS_I2C_IXR_DATA)
+ cdns_i2c_slave_rcv_data(id);
+
+ if (isr_status & CDNS_I2C_IXR_COMP) {
+ cdns_i2c_slave_rcv_data(id);
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ }
+ }
+
+ /* Master indicated xfer stop or fifo underflow/overflow */
+ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_RX_OVF |
+ CDNS_I2C_IXR_RX_UNF | CDNS_I2C_IXR_TX_OVF)) {
+ id->slave_state = CDNS_I2C_SLAVE_STATE_IDLE;
+ i2c_slave_event(id->slave, I2C_SLAVE_STOP, NULL);
+ cdns_i2c_writereg(CDNS_I2C_CR_CLR_FIFO, CDNS_I2C_CR_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * cdns_i2c_master_isr - Interrupt handler for the I2C device in master role
+ * @ptr: Pointer to I2C device private data
*
* This function handles the data interrupt, transfer complete interrupt and
- * the error interrupts of the I2C device.
+ * the error interrupts of the I2C device in master role.
*
* Return: IRQ_HANDLED always
*/
-static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+static irqreturn_t cdns_i2c_master_isr(void *ptr)
{
unsigned int isr_status, avail_bytes, updatetx;
unsigned int bytes_to_send;
@@ -347,6 +526,23 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
status = IRQ_HANDLED;
}
+ /* Handling Slave monitor mode interrupt */
+ if (isr_status & CDNS_I2C_IXR_SLV_RDY) {
+ unsigned int ctrl_reg;
+ /* Read control register */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+
+ /* Disable slave monitor mode */
+ ctrl_reg &= ~CDNS_I2C_CR_SLVMON;
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Clear interrupt flag for slvmon mode */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IDR_OFFSET);
+
+ done_flag = 1;
+ status = IRQ_HANDLED;
+ }
+
/* Update the status for errors */
id->err_status |= isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
if (id->err_status)
@@ -359,6 +555,29 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
}
/**
+ * cdns_i2c_isr - Interrupt handler for the I2C device
+ * @irq: irq number for the I2C device
+ * @ptr: void pointer to cdns_i2c structure
+ *
+ * This function passes the control to slave/master based on current role of
+ * i2c controller.
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
+{
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ struct cdns_i2c *id = ptr;
+
+ if (!(id->ctrl_reg & CDNS_I2C_CR_MS)) {
+ dev_dbg(&id->adap.dev, "slave interrupt\n");
+ return cdns_i2c_slave_isr(ptr);
+ }
+#endif
+ return cdns_i2c_master_isr(ptr);
+}
+
+/**
* cdns_i2c_mrecv - Prepare and start a master receive operation
* @id: pointer to the i2c device structure
*/
@@ -366,6 +585,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
{
unsigned int ctrl_reg;
unsigned int isr_status;
+ unsigned long flags;
id->p_recv_buf = id->p_msg->buf;
id->recv_count = id->p_msg->len;
@@ -383,8 +603,12 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) {
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ } else {
+ if (id->p_msg->flags & I2C_M_NOSTART)
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+ }
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -407,6 +631,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
}
/* Set the slave address in address register - triggers operation */
+ local_irq_save(flags);
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
/* Clear the bus hold flag if bytes to receive is less than FIFO size */
@@ -414,6 +639,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
(id->recv_count <= CDNS_I2C_FIFO_DEPTH))
cdns_i2c_clear_bus_hold(id);
+ local_irq_restore(flags);
cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
}
@@ -481,6 +707,40 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
}
/**
+ * cdns_i2c_slvmon - Handling Slav monitor mode feature
+ * @id: pointer to the i2c device
+ */
+static void cdns_i2c_slvmon(struct cdns_i2c *id)
+{
+ unsigned int ctrl_reg;
+ unsigned int isr_status;
+
+ id->p_recv_buf = NULL;
+ id->p_send_buf = id->p_msg->buf;
+ id->send_count = id->p_msg->len;
+
+ /* Clear the interrupts in interrupt status register. */
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
+
+ /* Enable slvmon control reg */
+ ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg |= CDNS_I2C_CR_MS | CDNS_I2C_CR_NEA | CDNS_I2C_CR_SLVMON
+ | CDNS_I2C_CR_CLR_FIFO;
+ ctrl_reg &= ~(CDNS_I2C_CR_RW);
+ cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+
+ /* Initialize slvmon reg */
+ cdns_i2c_writereg(0xF, CDNS_I2C_SLV_PAUSE_OFFSET);
+
+ /* Set the slave address to start the slave address transmission */
+ cdns_i2c_writereg(id->p_msg->addr, CDNS_I2C_ADDR_OFFSET);
+
+ /* Setup slvmon interrupt flag */
+ cdns_i2c_writereg(CDNS_I2C_IXR_SLV_RDY, CDNS_I2C_IER_OFFSET);
+}
+
+/**
* cdns_i2c_master_reset - Reset the interface
* @adap: pointer to the i2c adapter driver instance
*
@@ -496,7 +756,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
cdns_i2c_writereg(CDNS_I2C_IXR_ALL_INTR_MASK, CDNS_I2C_IDR_OFFSET);
/* Clear the hold bit and fifos */
regval = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
- regval &= ~CDNS_I2C_CR_HOLD;
+ regval &= ~(CDNS_I2C_CR_HOLD | CDNS_I2C_CR_SLVMON);
regval |= CDNS_I2C_CR_CLR_FIFO;
cdns_i2c_writereg(regval, CDNS_I2C_CR_OFFSET);
/* Update the transfercount register to zero */
@@ -530,9 +790,11 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
cdns_i2c_writereg(reg | CDNS_I2C_CR_NEA,
CDNS_I2C_CR_OFFSET);
}
-
- /* Check for the R/W flag on each msg */
- if (msg->flags & I2C_M_RD)
+ /* Check for zero length - Slave monitor mode */
+ if (msg->len == 0)
+ cdns_i2c_slvmon(id);
+ /* Check for the R/W flag on each msg */
+ else if (msg->flags & I2C_M_RD)
cdns_i2c_mrecv(id);
else
cdns_i2c_msend(id);
@@ -540,6 +802,7 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
/* Wait for the signal of completion */
time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
if (time_left == 0) {
+ i2c_recover_bus(adap);
cdns_i2c_master_reset(adap);
dev_err(id->adap.dev.parent,
"timeout waiting on completion\n");
@@ -577,11 +840,13 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
ret = pm_runtime_get_sync(id->dev);
if (ret < 0)
return ret;
+
/* Check if the bus is free */
- if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
- ret = -EAGAIN;
- goto out;
- }
+ if (msgs->len)
+ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
+ ret = -EAGAIN;
+ goto out;
+ }
hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
/*
@@ -635,6 +900,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
ret = num;
+
out:
pm_runtime_mark_last_busy(id->dev);
pm_runtime_put_autosuspend(id->dev);
@@ -649,14 +915,66 @@ out:
*/
static u32 cdns_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
- (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ u32 func = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR |
+ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ func |= I2C_FUNC_SLAVE;
+#endif
+
+ return func;
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static int cdns_reg_slave(struct i2c_client *slave)
+{
+ int ret;
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ if (id->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ ret = pm_runtime_get_sync(id->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Store slave information */
+ id->slave = slave;
+
+ /* Enable I2C slave */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
+
+ return 0;
}
+static int cdns_unreg_slave(struct i2c_client *slave)
+{
+ struct cdns_i2c *id = container_of(slave->adapter, struct cdns_i2c,
+ adap);
+
+ /* Remove slave information */
+ id->slave = NULL;
+
+ /* Enable I2C master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+ pm_runtime_put(id->dev);
+
+ return 0;
+}
+#endif
+
static const struct i2c_algorithm cdns_i2c_algo = {
.master_xfer = cdns_i2c_master_xfer,
.functionality = cdns_i2c_func,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = cdns_reg_slave,
+ .unreg_slave = cdns_unreg_slave,
+#endif
};
/**
@@ -746,12 +1064,11 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
if (ret)
return ret;
- ctrl_reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
+ ctrl_reg = id->ctrl_reg;
ctrl_reg &= ~(CDNS_I2C_CR_DIVA_MASK | CDNS_I2C_CR_DIVB_MASK);
ctrl_reg |= ((div_a << CDNS_I2C_CR_DIVA_SHIFT) |
(div_b << CDNS_I2C_CR_DIVB_SHIFT));
- cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
-
+ id->ctrl_reg = ctrl_reg;
return 0;
}
@@ -835,6 +1152,26 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
}
/**
+ * cdns_i2c_init - Controller initialisation
+ * @id: Device private data structure
+ *
+ * Initialise the i2c controller.
+ *
+ */
+static void cdns_i2c_init(struct cdns_i2c *id)
+{
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ /*
+ * Cadence I2C controller has a bug wherein it generates
+ * invalid read transaction after HW timeout in master receiver mode.
+ * HW timeout is not used by this driver and the interrupt is disabled.
+ * But the feature itself cannot be disabled. Hence maximum value
+ * is written to this register to reduce the chances of error.
+ */
+ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+}
+
+/**
* cdns_i2c_runtime_resume - Runtime resume
* @dev: Address of the platform_device structure
*
@@ -852,6 +1189,89 @@ static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
dev_err(dev, "Cannot enable clock.\n");
return ret;
}
+ cdns_i2c_init(xi2c);
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_prepare_recovery - Withhold recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called to prepare for recovery.
+ * It changes the state of pins from SCL/SDA to GPIO.
+ */
+static void cdns_i2c_prepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state as gpio */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_gpio);
+}
+
+/**
+ * cdns_i2c_unprepare_recovery - Release recovery state
+ * @adapter: Pointer to i2c adapter
+ *
+ * This function is called on exiting recovery. It reverts
+ * the state of pins from GPIO to SCL/SDA.
+ */
+static void cdns_i2c_unprepare_recovery(struct i2c_adapter *adapter)
+{
+ struct cdns_i2c *p_cdns_i2c;
+
+ p_cdns_i2c = container_of(adapter, struct cdns_i2c, adap);
+
+ /* Setting pin state to default(i2c) */
+ pinctrl_select_state(p_cdns_i2c->pinctrl,
+ p_cdns_i2c->pinctrl_pins_default);
+}
+
+/**
+ * cdns_i2c_init_recovery_info - Initialize I2C bus recovery
+ * @pid: Pointer to cdns i2c structure
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does required initialization for i2c bus
+ * recovery. It registers three functions for prepare,
+ * recover and unprepare
+ *
+ * Return: 0 on Success, negative error otherwise.
+ */
+static int cdns_i2c_init_recovery_info(struct cdns_i2c *pid,
+ struct platform_device *pdev)
+{
+ struct i2c_bus_recovery_info *rinfo = &pid->rinfo;
+
+ pid->pinctrl_pins_default = pinctrl_lookup_state(pid->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ pid->pinctrl_pins_gpio = pinctrl_lookup_state(pid->pinctrl, "gpio");
+
+ /* Fetches GPIO pins */
+ rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda-gpios", 0);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl-gpios", 0);
+
+ /* if GPIO driver isn't ready yet, deffer probe */
+ if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
+ PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* Validates fetched information */
+ if (IS_ERR(rinfo->sda_gpiod) ||
+ IS_ERR(rinfo->scl_gpiod) ||
+ IS_ERR(pid->pinctrl_pins_default) ||
+ IS_ERR(pid->pinctrl_pins_gpio)) {
+ dev_dbg(&pdev->dev, "recovery information incomplete\n");
+ return 0;
+ }
+
+ rinfo->prepare_recovery = cdns_i2c_prepare_recovery;
+ rinfo->unprepare_recovery = cdns_i2c_unprepare_recovery;
+ rinfo->recover_bus = i2c_generic_scl_recovery;
+ pid->adap.bus_recovery_info = rinfo;
return 0;
}
@@ -902,6 +1322,13 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->quirks = data->quirks;
}
+ id->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(id->pinctrl)) {
+ ret = cdns_i2c_init_recovery_info(id, pdev);
+ if (ret)
+ return ret;
+ }
+
r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
id->membase = devm_ioremap_resource(&pdev->dev, r_mem);
if (IS_ERR(id->membase))
@@ -945,8 +1372,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (ret || (id->i2c_clk > I2C_MAX_FAST_MODE_FREQ))
id->i2c_clk = I2C_MAX_STANDARD_MODE_FREQ;
- cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS,
- CDNS_I2C_CR_OFFSET);
+ id->ctrl_reg = CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS;
ret = cdns_i2c_setclk(id->input_clk, id);
if (ret) {
@@ -961,15 +1387,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
goto err_clk_dis;
}
-
- /*
- * Cadence I2C controller has a bug wherein it generates
- * invalid read transaction after HW timeout in master receiver mode.
- * HW timeout is not used by this driver and the interrupt is disabled.
- * But the feature itself cannot be disabled. Hence maximum value
- * is written to this register to reduce the chances of error.
- */
- cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+ cdns_i2c_init(id);
ret = i2c_add_adapter(&id->adap);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 90c1c362394d..7336b13d0e09 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -59,6 +59,9 @@ enum xiic_endian {
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
* @clk: Pointer to AXI4-lite input clock
+ * @dynamic: Mode of controller
+ * @repeated_start: Repeated start operation
+ * @prev_msg_tx: Previous message is Tx
*/
struct xiic_i2c {
struct device *dev;
@@ -74,26 +77,28 @@ struct xiic_i2c {
int rx_pos;
enum xiic_endian endianness;
struct clk *clk;
+ bool dynamic;
+ bool repeated_start;
+ bool prev_msg_tx;
};
-
#define XIIC_MSB_OFFSET 0
-#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
+#define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET)
/*
* Register offsets in bytes from RegisterBase. Three is added to the
* base offset to access LSB (IBM style) of the word
*/
-#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
-#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
-#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
-#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
-#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
-#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
-#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
-#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
-#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
-#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
+#define XIIC_CR_REG_OFFSET (0x00 + XIIC_REG_OFFSET) /* Control Register */
+#define XIIC_SR_REG_OFFSET (0x04 + XIIC_REG_OFFSET) /* Status Register */
+#define XIIC_DTR_REG_OFFSET (0x08 + XIIC_REG_OFFSET) /* Data Tx Register */
+#define XIIC_DRR_REG_OFFSET (0x0C + XIIC_REG_OFFSET) /* Data Rx Register */
+#define XIIC_ADR_REG_OFFSET (0x10 + XIIC_REG_OFFSET) /* Address Register */
+#define XIIC_TFO_REG_OFFSET (0x14 + XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
+#define XIIC_RFO_REG_OFFSET (0x18 + XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
+#define XIIC_TBA_REG_OFFSET (0x1C + XIIC_REG_OFFSET) /* 10 Bit Address reg */
+#define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
+#define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */
/* Control Register masks */
#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
@@ -142,6 +147,9 @@ struct xiic_i2c {
#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
+/* Dynamic mode constants */
+#define MAX_READ_LENGTH_DYNAMIC 255 /* Max length for dynamic read */
+
/*
* The following constants define the register offsets for the Interrupt
* registers. There are some holes in the memory map for reserved addresses
@@ -228,18 +236,21 @@ static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
{
u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
}
static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
{
u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
+
xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
}
static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
{
u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+
xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
}
@@ -268,6 +279,24 @@ static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
return 0;
}
+static int xiic_wait_tx_empty(struct xiic_i2c *i2c)
+{
+ u8 isr;
+ unsigned long timeout;
+
+ timeout = jiffies + XIIC_I2C_TIMEOUT;
+ for (isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+ !(isr & XIIC_INTR_TX_EMPTY_MASK);
+ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(i2c->dev, "Timeout waiting at Tx empty\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
static int xiic_reinit(struct xiic_i2c *i2c)
{
int ret;
@@ -309,13 +338,14 @@ static void xiic_deinit(struct xiic_i2c *i2c)
static void xiic_read_rx(struct xiic_i2c *i2c)
{
- u8 bytes_in_fifo;
+ u8 bytes_in_fifo, cr = 0, bytes_to_read = 0;
+ u32 bytes_rem = 0;
int i;
bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
dev_dbg(i2c->adap.dev.parent,
- "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
+ "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
__func__, bytes_in_fifo, xiic_rx_space(i2c),
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
@@ -323,13 +353,53 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
if (bytes_in_fifo > xiic_rx_space(i2c))
bytes_in_fifo = xiic_rx_space(i2c);
- for (i = 0; i < bytes_in_fifo; i++)
+ bytes_to_read = bytes_in_fifo;
+
+ if (!i2c->dynamic) {
+ bytes_rem = xiic_rx_space(i2c) - bytes_in_fifo;
+
+ if (bytes_rem > IIC_RX_FIFO_DEPTH) {
+ bytes_to_read = bytes_in_fifo;
+ } else if (bytes_rem > 1) {
+ bytes_to_read = bytes_rem - 1;
+ } else if (bytes_rem == 1) {
+ bytes_to_read = 1;
+ /* Set NACK in CR to indicate slave transmitter */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr |
+ XIIC_CR_NO_ACK_MASK);
+ } else if (bytes_rem == 0) {
+ bytes_to_read = bytes_in_fifo;
+
+ /* Generate stop on the bus if it is last message */
+ if (i2c->nmsgs == 1) {
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
+
+ /* Make TXACK=0, clean up for next transaction */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_NO_ACK_MASK);
+ }
+ }
+
+ /* Read the fifo */
+ for (i = 0; i < bytes_to_read; i++) {
i2c->rx_msg->buf[i2c->rx_pos++] =
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+ }
+
+ if (i2c->dynamic) {
+ u8 bytes;
+
+ /* Receive remaining bytes if less than fifo depth */
+ bytes = min_t(u8, xiic_rx_space(i2c), IIC_RX_FIFO_DEPTH);
+ bytes--;
- xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
- (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
- IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes);
+ }
}
static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
@@ -350,7 +420,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
while (len--) {
u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
- if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
+
+ if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
/* last message in transfer -> STOP */
data |= XIIC_TX_DYN_STOP_MASK;
dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
@@ -359,6 +430,56 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
}
}
+static void xiic_std_fill_tx_fifo(struct xiic_i2c *i2c)
+{
+ u8 fifo_space = xiic_tx_fifo_space(i2c);
+ u16 data = 0;
+ int len = xiic_tx_space(i2c);
+
+ dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
+ __func__, len, fifo_space);
+
+ if (len > fifo_space)
+ len = fifo_space;
+ else if (len && !(i2c->repeated_start))
+ len--;
+
+ while (len--) {
+ data = i2c->tx_msg->buf[i2c->tx_pos++];
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+}
+
+static void xiic_send_tx(struct xiic_i2c *i2c)
+{
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, rem: %d, SR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_tx_space(i2c),
+ xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+
+ if (xiic_tx_space(i2c) > 1) {
+ xiic_std_fill_tx_fifo(i2c);
+ return;
+ }
+
+ if ((xiic_tx_space(i2c) == 1)) {
+ u16 data;
+
+ if (i2c->nmsgs == 1) {
+ u8 cr;
+
+ /* Write to CR to stop */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
+ /* Send last byte */
+ data = i2c->tx_msg->buf[i2c->tx_pos++];
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+}
+
static void xiic_wakeup(struct xiic_i2c *i2c, int code)
{
i2c->tx_msg = NULL;
@@ -389,12 +510,14 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
i2c->tx_msg, i2c->nmsgs);
-
+ dev_dbg(i2c->adap.dev.parent, "%s, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
/* Service requesting interrupt */
if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
- ((pend & XIIC_INTR_TX_ERROR_MASK) &&
- !(pend & XIIC_INTR_RX_FULL_MASK))) {
+ ((pend & XIIC_INTR_TX_ERROR_MASK) &&
+ !(pend & XIIC_INTR_RX_FULL_MASK))) {
/* bus arbritration lost, or...
* Transmit error _OR_ RX completed
* if this happens when RX_FULL is not set
@@ -451,22 +574,6 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
}
}
}
- if (pend & XIIC_INTR_BNB_MASK) {
- /* IIC bus has transitioned to not busy */
- clr |= XIIC_INTR_BNB_MASK;
-
- /* The bus is not busy, disable BusNotBusy interrupt */
- xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
-
- if (!i2c->tx_msg)
- goto out;
-
- if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
- xiic_tx_space(i2c) == 0)
- xiic_wakeup(i2c, STATE_DONE);
- else
- xiic_wakeup(i2c, STATE_ERROR);
- }
if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
/* Transmit register/FIFO is empty or ½ empty */
@@ -479,7 +586,10 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
goto out;
}
- xiic_fill_tx_fifo(i2c);
+ if (i2c->dynamic)
+ xiic_fill_tx_fifo(i2c);
+ else
+ xiic_send_tx(i2c);
/* current message sent and there is space in the fifo */
if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
@@ -503,6 +613,24 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
*/
xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
}
+
+ if (pend & XIIC_INTR_BNB_MASK) {
+ /* IIC bus has transitioned to not busy */
+ clr |= XIIC_INTR_BNB_MASK;
+
+ /* The bus is not busy, disable BusNotBusy interrupt */
+ xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
+
+ if (!i2c->tx_msg)
+ goto out;
+
+ if (i2c->nmsgs == 1 && !i2c->rx_msg &&
+ xiic_tx_space(i2c) == 0)
+ xiic_wakeup(i2c, STATE_DONE);
+ else
+ xiic_wakeup(i2c, STATE_ERROR);
+ }
+
out:
dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
@@ -541,35 +669,115 @@ static int xiic_busy(struct xiic_i2c *i2c)
static void xiic_start_recv(struct xiic_i2c *i2c)
{
- u8 rx_watermark;
+ u16 rx_watermark;
+ u8 cr = 0, rfd_set = 0;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
unsigned long flags;
- /* Clear and enable Rx full interrupt. */
- xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
- /* we want to get all but last byte, because the TX_ERROR IRQ is used
- * to inidicate error ACK on the address, and negative ack on the last
- * received byte, so to not mix them receive all but last.
- * In the case where there is only one byte to receive
- * we can check if ERROR and RX full is set at the same time
- */
- rx_watermark = msg->len;
- if (rx_watermark > IIC_RX_FIFO_DEPTH)
- rx_watermark = IIC_RX_FIFO_DEPTH;
- xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
-
- local_irq_save(flags);
- if (!(msg->flags & I2C_M_NOSTART))
- /* write the address */
- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
- i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK);
+ /* Disable Tx interrupts */
+ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK);
+
+ if (i2c->dynamic) {
+ u8 bytes;
+ u16 val;
+
+ /* Clear and enable Rx full interrupt. */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /*
+ * We want to get all but last byte, because the TX_ERROR IRQ
+ * is used to indicate error ACK on the address, and
+ * negative ack on the last received byte, so to not mix
+ * them receive all but last.
+ * In the case where there is only one byte to receive
+ * we can check if ERROR and RX full is set at the same time
+ */
+ rx_watermark = msg->len;
+ bytes = min_t(u8, rx_watermark, IIC_RX_FIFO_DEPTH);
+ bytes--;
+
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes);
- xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
+ local_irq_save(flags);
+ if (!(msg->flags & I2C_M_NOSTART))
+ /* write the address */
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
+ i2c_8bit_addr_from_msg(msg) |
+ XIIC_TX_DYN_START_MASK);
- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
- msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
- local_irq_restore(flags);
+ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
+
+ /* If last message, include dynamic stop bit with length */
+ val = (i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0;
+ val |= msg->len;
+
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, val);
+ local_irq_restore(flags);
+ } else {
+ /*
+ * If previous message is Tx, make sure that Tx FIFO is empty
+ * before starting a new transfer as the repeated start in
+ * standard mode can corrupt the transaction if there are
+ * still bytes to be transmitted in FIFO
+ */
+ if (i2c->prev_msg_tx) {
+ int status;
+
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+ }
+
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+
+ /* Set Receive fifo depth */
+ rx_watermark = msg->len;
+ if (rx_watermark > IIC_RX_FIFO_DEPTH) {
+ rfd_set = IIC_RX_FIFO_DEPTH - 1;
+ } else if ((rx_watermark == 1) || (rx_watermark == 0)) {
+ rfd_set = rx_watermark - 1;
+ /* Handle single byte transfer separately */
+ cr |= XIIC_CR_NO_ACK_MASK;
+ } else {
+ rfd_set = rx_watermark - 2;
+ }
+
+ /* Check if RSTA should be set */
+ if (cr & XIIC_CR_MSMS_MASK) {
+ i2c->repeated_start = true;
+ /* Already a master, RSTA should be set */
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_REPEATED_START_MASK) &
+ ~(XIIC_CR_DIR_IS_TX_MASK));
+ }
+
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set);
+
+ /* Clear and enable Rx full and transmit complete interrupts */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /* Write the address */
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
+ i2c_8bit_addr_from_msg(msg));
+
+ /* Write to Control Register,to start transaction in Rx mode */
+ if ((cr & XIIC_CR_MSMS_MASK) == 0) {
+ i2c->repeated_start = false;
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_MSMS_MASK)
+ & ~(XIIC_CR_DIR_IS_TX_MASK));
+ }
+
+ dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ }
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
@@ -577,10 +785,17 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
/* the message is tx:ed */
i2c->tx_pos = msg->len;
+
+ /* Enable interrupts */
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+
+ i2c->prev_msg_tx = false;
}
static void xiic_start_send(struct xiic_i2c *i2c)
{
+ u8 cr = 0;
+ u16 data;
struct i2c_msg *msg = i2c->tx_msg;
xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
@@ -591,22 +806,73 @@ static void xiic_start_send(struct xiic_i2c *i2c)
__func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
- if (!(msg->flags & I2C_M_NOSTART)) {
- /* write the address */
- u16 data = i2c_8bit_addr_from_msg(msg) |
- XIIC_TX_DYN_START_MASK;
- if ((i2c->nmsgs == 1) && msg->len == 0)
- /* no data and last message -> add STOP */
- data |= XIIC_TX_DYN_STOP_MASK;
+ if (i2c->dynamic) {
+ if (!(msg->flags & I2C_M_NOSTART)) {
+ /* write the address */
+ data = i2c_8bit_addr_from_msg(msg) |
+ XIIC_TX_DYN_START_MASK;
+
+ if (i2c->nmsgs == 1 && msg->len == 0)
+ /* no data and last message -> add STOP */
+ data |= XIIC_TX_DYN_STOP_MASK;
+
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+
+ xiic_fill_tx_fifo(i2c);
+ /* Clear any pending Tx empty, Tx Error and then enable them */
+ xiic_irq_clr_en(i2c, (XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_BNB_MASK));
+ } else {
+ /*
+ * If previous message is Tx, make sure that Tx FIFO is empty
+ * before starting a new transfer as the repeated start in
+ * standard mode can corrupt the transaction if there are
+ * still bytes to be transmitted in FIFO
+ */
+ if (i2c->prev_msg_tx) {
+ int status;
+
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+ }
+
+ /* Check if RSTA should be set */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ if (cr & XIIC_CR_MSMS_MASK) {
+ i2c->repeated_start = true;
+ /* Already a master, RSTA should be set */
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_REPEATED_START_MASK |
+ XIIC_CR_DIR_IS_TX_MASK) &
+ ~(XIIC_CR_NO_ACK_MASK));
+ }
+
+ /* Write address to FIFO */
+ data = i2c_8bit_addr_from_msg(msg);
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
- }
+ /* Fill fifo */
+ xiic_std_fill_tx_fifo(i2c);
- xiic_fill_tx_fifo(i2c);
+ if ((cr & XIIC_CR_MSMS_MASK) == 0) {
+ i2c->repeated_start = false;
+
+ /* Start Tx by writing to CR */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr |
+ XIIC_CR_MSMS_MASK |
+ XIIC_CR_DIR_IS_TX_MASK);
+ }
- /* Clear any pending Tx empty, Tx Error and then enable them. */
- xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
- XIIC_INTR_BNB_MASK);
+ /* Clear any pending Tx empty, Tx Error and then enable them */
+ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_BNB_MASK);
+ }
+ i2c->prev_msg_tx = true;
}
static irqreturn_t xiic_isr(int irq, void *dev_id)
@@ -633,6 +899,7 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
{
int first = 1;
int fifo_space = xiic_tx_fifo_space(i2c);
+
dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
__func__, i2c->tx_msg, fifo_space);
@@ -647,19 +914,20 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
i2c->nmsgs--;
i2c->tx_msg++;
i2c->tx_pos = 0;
- } else
+ } else {
first = 0;
+ }
if (i2c->tx_msg->flags & I2C_M_RD) {
/* we dont date putting several reads in the FIFO */
xiic_start_recv(i2c);
return;
- } else {
- xiic_start_send(i2c);
- if (xiic_tx_space(i2c) != 0) {
- /* the message could not be completely sent */
- break;
- }
+ }
+
+ xiic_start_send(i2c);
+ if (xiic_tx_space(i2c) != 0) {
+ /* the message could not be completely sent */
+ break;
}
fifo_space = xiic_tx_fifo_space(i2c);
@@ -670,12 +938,12 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
*/
if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
-
}
static int xiic_start_xfer(struct xiic_i2c *i2c)
{
int ret;
+
mutex_lock(&i2c->lock);
ret = xiic_reinit(i2c);
@@ -690,7 +958,7 @@ static int xiic_start_xfer(struct xiic_i2c *i2c)
static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct xiic_i2c *i2c = i2c_get_adapdata(adap);
- int err;
+ int err, count;
dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
@@ -706,14 +974,29 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c->tx_msg = msgs;
i2c->nmsgs = num;
+ /* Decide standard mode or Dynamic mode */
+ i2c->dynamic = true;
+
+ /* Initialize prev message type */
+ i2c->prev_msg_tx = false;
+
+ /* Enter standard mode only when read length is > 255 bytes */
+ for (count = 0; count < i2c->nmsgs; count++) {
+ if ((i2c->tx_msg[count].flags & I2C_M_RD) &&
+ i2c->tx_msg[count].len > MAX_READ_LENGTH_DYNAMIC) {
+ i2c->dynamic = false;
+ break;
+ }
+ }
+
err = xiic_start_xfer(i2c);
if (err < 0) {
dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
goto out;
}
- if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
- (i2c->state == STATE_DONE), HZ)) {
+ if (wait_event_timeout(i2c->wait, i2c->state == STATE_ERROR ||
+ i2c->state == STATE_DONE, HZ)) {
err = (i2c->state == STATE_DONE) ? num : -EIO;
goto out;
} else {
@@ -739,19 +1022,13 @@ static const struct i2c_algorithm xiic_algorithm = {
.functionality = xiic_func,
};
-static const struct i2c_adapter_quirks xiic_quirks = {
- .max_read_len = 255,
-};
-
static const struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.class = I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
- .quirks = &xiic_quirks,
};
-
static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
@@ -908,6 +1185,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
xiic_i2c_runtime_resume, NULL)
};
+
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = xiic_i2c_remove,
@@ -923,4 +1201,3 @@ module_platform_driver(xiic_i2c_driver);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index a0d926ae3f86..0286fd5702b6 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -246,11 +246,8 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
int ret = 0;
regval = pca954x_regval(data, chan);
- /* Only select the channel if its different from the last channel */
- if (data->last_chan != regval) {
- ret = pca954x_reg_write(muxc->parent, client, regval);
- data->last_chan = ret < 0 ? 0 : regval;
- }
+ ret = pca954x_reg_write(muxc->parent, client, regval);
+ data->last_chan = ret < 0 ? 0 : regval;
return ret;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 12bb8b7ca1ff..2f1712350c24 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1165,4 +1165,14 @@ config XILINX_XADC
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
+config XILINX_AMS
+ tristate "Xilinx AMS driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to have support for the Xilinx AMS.
+
+ The driver can also be build as a module. If so, the module will be called
+ xilinx-ams.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 637807861112..c43575d5a178 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -106,4 +106,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
new file mode 100644
index 000000000000..c21c46512d5f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -0,0 +1,1109 @@
+/*
+ * Xilinx AMS driver
+ *
+ * Licensed under the GPL-2
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/iopoll.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/io.h>
+
+#include "xilinx-ams.h"
+#include <linux/delay.h>
+
+static const unsigned int AMS_UNMASK_TIMEOUT = 500;
+
+static inline void ams_read_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ *data = readl(ams->base + offset);
+}
+
+static inline void ams_write_reg(struct ams *ams, unsigned int offset, u32 data)
+{
+ writel(data, ams->base + offset);
+}
+
+static inline void ams_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_read_reg(ams, offset, &val);
+ ams_write_reg(ams, offset, (val & ~mask) | (mask & data));
+}
+
+static inline void ams_ps_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->ps_base + offset);
+}
+
+static inline void ams_ps_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->ps_base + offset);
+}
+
+static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_ps_read_reg(ams, offset, &val);
+ ams_ps_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static inline void ams_apb_pl_read_reg(struct ams *ams, unsigned int offset,
+ u32 *data)
+{
+ *data = readl(ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_write_reg(struct ams *ams, unsigned int offset,
+ u32 data)
+{
+ writel(data, ams->pl_base + offset);
+}
+
+static inline void ams_apb_pl_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ ams_apb_pl_read_reg(ams, offset, &val);
+ ams_apb_pl_write_reg(ams, offset, (val & ~mask) | (data & mask));
+}
+
+static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
+{
+ /* intr_mask variable in ams represent bit in AMS regisetr IDR0 and IDR1
+ * first 32 biit will be of IDR0, next one are of IDR1 register.
+ */
+ ams->intr_mask &= ~mask;
+ ams->intr_mask |= (val & mask);
+
+ ams_write_reg(ams, AMS_IER_0, ~(ams->intr_mask | ams->masked_alarm));
+ ams_write_reg(ams, AMS_IER_1,
+ ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT));
+ ams_write_reg(ams, AMS_IDR_0, ams->intr_mask | ams->masked_alarm);
+ ams_write_reg(ams, AMS_IDR_1,
+ ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+}
+
+static void iio_ams_disable_all_alarm(struct ams *ams)
+{
+ /* disable PS module alarm */
+ if (ams->ps_base) {
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+
+ /* disable PL module alarm */
+ if (ams->pl_base) {
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+}
+
+static void iio_ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ u32 cfg;
+ unsigned long flags;
+ unsigned long pl_alarm_mask;
+
+ if (ams->ps_base) {
+ /* Configuring PS alarm enable */
+ cfg = ~((alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ cfg);
+
+ cfg = ~((alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ cfg);
+ }
+
+ if (ams->pl_base) {
+ pl_alarm_mask = (alarm_mask >> AMS_PL_ALARM_START);
+ /* Configuring PL alarm enable */
+ cfg = ~((pl_alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
+ AMS_CONF1_ALARM_2_TO_0_SHIFT);
+ cfg &= ~((pl_alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
+ AMS_CONF1_ALARM_6_TO_3_SHIFT);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1,
+ AMS_REGCFG1_ALARM_MASK, cfg);
+
+ cfg = ~((pl_alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
+ AMS_ISR0_ALARM_12_TO_7_MASK);
+ ams->pl_bus->update(ams, AMS_REG_CONFIG3,
+ AMS_REGCFG3_ALARM_MASK, cfg);
+ }
+
+ spin_lock_irqsave(&ams->lock, flags);
+ ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ spin_unlock_irqrestore(&ams->lock, flags);
+}
+
+static void ams_enable_channel_sequence(struct ams *ams)
+{
+ int i;
+ unsigned long long scan_mask;
+ struct iio_dev *indio_dev = iio_priv_to_dev(ams);
+
+ /* Enable channel sequence. First 22 bit of scan_mask represent
+ * PS channels, and next remaining bit represents PL channels.
+ */
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = 1 | (1 << PS_SEQ_MAX);
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT(indio_dev->channels[i].scan_index);
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams_ps_write_reg(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+
+ if (ams->pl_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ scan_mask = (scan_mask >> PS_SEQ_MAX);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH0,
+ scan_mask & AMS_REG_SEQ0_MASK);
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
+ (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
+ ams->pl_bus->write(ams, AMS_REG_SEQ_CH1, AMS_REG_SEQ1_MASK &
+ (scan_mask >> AMS_REG_SEQ1_MASK_SHIFT));
+
+ /* set continuous sequence mode */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+}
+
+static int iio_ams_init_device(struct ams *ams)
+{
+ int ret = 0;
+ u32 reg;
+
+ /* reset AMS */
+ if (ams->ps_base) {
+ ams_ps_write_reg(ams, AMS_VP_VN, AMS_PS_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg,
+ (reg & AMS_PS_CSTS_PS_READY) ==
+ AMS_PS_CSTS_PS_READY, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ if (ams->pl_base) {
+ ams->pl_bus->write(ams, AMS_VP_VN, AMS_PL_RESET_VALUE);
+
+ ret = readl_poll_timeout(ams->base + AMS_PL_CSTS, reg,
+ (reg & AMS_PL_CSTS_ACCESS_MASK) ==
+ AMS_PL_CSTS_ACCESS_MASK, 0,
+ AMS_INIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ iio_ams_disable_all_alarm(ams);
+
+ /* Disable interrupt */
+ ams_update_intrmask(ams, ~0, ~0);
+
+ /* Clear any pending interrupt */
+ ams_write_reg(ams, AMS_ISR_0, AMS_ISR0_ALARM_MASK);
+ ams_write_reg(ams, AMS_ISR_1, AMS_ISR1_ALARM_MASK);
+
+ return ret;
+}
+
+static void ams_enable_single_channel(struct ams *ams, unsigned int offset)
+{
+ u8 channel_num = 0;
+
+ switch (offset) {
+ case AMS_VCC_PSPLL0:
+ channel_num = AMS_VCC_PSPLL0_CH;
+ break;
+ case AMS_VCC_PSPLL3:
+ channel_num = AMS_VCC_PSPLL3_CH;
+ break;
+ case AMS_VCCINT:
+ channel_num = AMS_VCCINT_CH;
+ break;
+ case AMS_VCCBRAM:
+ channel_num = AMS_VCCBRAM_CH;
+ break;
+ case AMS_VCCAUX:
+ channel_num = AMS_VCCAUX_CH;
+ break;
+ case AMS_PSDDRPLL:
+ channel_num = AMS_PSDDRPLL_CH;
+ break;
+ case AMS_PSINTFPDDR:
+ channel_num = AMS_PSINTFPDDR_CH;
+ break;
+ default:
+ break;
+ }
+
+ /* set single channel, sequencer off mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_SINGLE_CHANNEL);
+
+ /* write the channel number */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
+ channel_num);
+ mdelay(1);
+}
+
+static void ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ ams_enable_single_channel(ams, offset);
+ ams_read_reg(ams, offset, data);
+ ams_enable_channel_sequence(ams);
+}
+
+static int ams_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ams->mutex);
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ ams_read_vcc_reg(ams, chan->address, val);
+ else if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, chan->address, val);
+ else
+ ams_ps_read_reg(ams, chan->address, val);
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ switch (chan->address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ break;
+ case AMS_SUPPLY7:
+ case AMS_SUPPLY8:
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ case AMS_SUPPLY9:
+ case AMS_SUPPLY10:
+ if (chan->scan_index < PS_SEQ_MAX)
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_6VOLT;
+ break;
+ default:
+ if (chan->scan_index >= (PS_SEQ_MAX * 3))
+ *val = AMS_SUPPLY_SCALE_3VOLT;
+ else
+ *val = AMS_SUPPLY_SCALE_1VOLT;
+ break;
+ }
+ *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = AMS_TEMP_SCALE;
+ *val2 = AMS_TEMP_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = AMS_TEMP_OFFSET;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
+{
+ int offset = 0;
+
+ if (scan_index >= PS_SEQ_MAX)
+ scan_index -= PS_SEQ_MAX;
+
+ if (dir == IIO_EV_DIR_FALLING) {
+ if (scan_index < AMS_SEQ_SUPPLY7)
+ offset = AMS_ALARM_THRESOLD_OFF_10;
+ else
+ offset = AMS_ALARM_THRESOLD_OFF_20;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return (AMS_ALARM_TEMP + offset);
+ case AMS_SEQ_SUPPLY1:
+ return (AMS_ALARM_SUPPLY1 + offset);
+ case AMS_SEQ_SUPPLY2:
+ return (AMS_ALARM_SUPPLY2 + offset);
+ case AMS_SEQ_SUPPLY3:
+ return (AMS_ALARM_SUPPLY3 + offset);
+ case AMS_SEQ_SUPPLY4:
+ return (AMS_ALARM_SUPPLY4 + offset);
+ case AMS_SEQ_SUPPLY5:
+ return (AMS_ALARM_SUPPLY5 + offset);
+ case AMS_SEQ_SUPPLY6:
+ return (AMS_ALARM_SUPPLY6 + offset);
+ case AMS_SEQ_SUPPLY7:
+ return (AMS_ALARM_SUPPLY7 + offset);
+ case AMS_SEQ_SUPPLY8:
+ return (AMS_ALARM_SUPPLY8 + offset);
+ case AMS_SEQ_SUPPLY9:
+ return (AMS_ALARM_SUPPLY9 + offset);
+ case AMS_SEQ_SUPPLY10:
+ return (AMS_ALARM_SUPPLY10 + offset);
+ case AMS_SEQ_VCCAMS:
+ return (AMS_ALARM_VCCAMS + offset);
+ case AMS_SEQ_TEMP_REMOTE:
+ return (AMS_ALARM_TEMP_REMOTE + offset);
+ }
+
+ return 0;
+}
+
+static const struct iio_chan_spec *ams_event_to_channel(
+ struct iio_dev *indio_dev, u32 event)
+{
+ int scan_index = 0, i;
+
+ if (event >= AMS_PL_ALARM_START) {
+ event -= AMS_PL_ALARM_START;
+ scan_index = PS_SEQ_MAX;
+ }
+
+ switch (event) {
+ case AMS_ALARM_BIT_TEMP:
+ scan_index += AMS_SEQ_TEMP;
+ break;
+ case AMS_ALARM_BIT_SUPPLY1:
+ scan_index += AMS_SEQ_SUPPLY1;
+ break;
+ case AMS_ALARM_BIT_SUPPLY2:
+ scan_index += AMS_SEQ_SUPPLY2;
+ break;
+ case AMS_ALARM_BIT_SUPPLY3:
+ scan_index += AMS_SEQ_SUPPLY3;
+ break;
+ case AMS_ALARM_BIT_SUPPLY4:
+ scan_index += AMS_SEQ_SUPPLY4;
+ break;
+ case AMS_ALARM_BIT_SUPPLY5:
+ scan_index += AMS_SEQ_SUPPLY5;
+ break;
+ case AMS_ALARM_BIT_SUPPLY6:
+ scan_index += AMS_SEQ_SUPPLY6;
+ break;
+ case AMS_ALARM_BIT_SUPPLY7:
+ scan_index += AMS_SEQ_SUPPLY7;
+ break;
+ case AMS_ALARM_BIT_SUPPLY8:
+ scan_index += AMS_SEQ_SUPPLY8;
+ break;
+ case AMS_ALARM_BIT_SUPPLY9:
+ scan_index += AMS_SEQ_SUPPLY9;
+ break;
+ case AMS_ALARM_BIT_SUPPLY10:
+ scan_index += AMS_SEQ_SUPPLY10;
+ break;
+ case AMS_ALARM_BIT_VCCAMS:
+ scan_index += AMS_SEQ_VCCAMS;
+ break;
+ case AMS_ALARM_BIT_TEMP_REMOTE:
+ scan_index += AMS_SEQ_TEMP_REMOTE;
+ break;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == scan_index)
+ break;
+
+ return &indio_dev->channels[i];
+}
+
+static int ams_get_alarm_mask(int scan_index)
+{
+ int bit = 0;
+
+ if (scan_index >= PS_SEQ_MAX) {
+ bit = AMS_PL_ALARM_START;
+ scan_index -= PS_SEQ_MAX;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return BIT(AMS_ALARM_BIT_TEMP + bit);
+ case AMS_SEQ_SUPPLY1:
+ return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
+ case AMS_SEQ_SUPPLY2:
+ return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
+ case AMS_SEQ_SUPPLY3:
+ return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
+ case AMS_SEQ_SUPPLY4:
+ return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
+ case AMS_SEQ_SUPPLY5:
+ return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
+ case AMS_SEQ_SUPPLY6:
+ return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
+ case AMS_SEQ_SUPPLY7:
+ return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
+ case AMS_SEQ_SUPPLY8:
+ return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
+ case AMS_SEQ_SUPPLY9:
+ return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
+ case AMS_SEQ_SUPPLY10:
+ return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
+ case AMS_SEQ_VCCAMS:
+ return BIT(AMS_ALARM_BIT_VCCAMS + bit);
+ case AMS_SEQ_TEMP_REMOTE:
+ return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
+ }
+
+ return 0;
+}
+
+static int ams_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ return (ams->alarm_mask & ams_get_alarm_mask(chan->scan_index)) ? 1 : 0;
+}
+
+static int ams_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ alarm = ams_get_alarm_mask(chan->scan_index);
+
+ mutex_lock(&ams->mutex);
+
+ if (state)
+ ams->alarm_mask |= alarm;
+ else
+ ams->alarm_mask &= ~alarm;
+
+ iio_ams_update_alarm(ams, ams->alarm_mask);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static int ams_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
+
+ mutex_lock(&ams->mutex);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->read(ams, offset, val);
+ else
+ ams_ps_read_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ *val2 = 0;
+ return IIO_VAL_INT;
+}
+
+static int ams_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset;
+
+ mutex_lock(&ams->mutex);
+
+ /* Set temperature channel threshold to direct threshold */
+ if (chan->type == IIO_TEMP) {
+ offset = ams_get_alarm_offset(chan->scan_index,
+ IIO_EV_DIR_FALLING);
+
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->update(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ else
+ ams_ps_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ }
+
+ offset = ams_get_alarm_offset(chan->scan_index, dir);
+ if (chan->scan_index >= PS_SEQ_MAX)
+ ams->pl_bus->write(ams, offset, val);
+ else
+ ams_ps_write_reg(ams, offset, val);
+
+ mutex_unlock(&ams->mutex);
+
+ return 0;
+}
+
+static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
+{
+ const struct iio_chan_spec *chan;
+
+ chan = ams_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /* The temperature channel only supports over-temperature
+ * events
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ } else {
+ /* For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+}
+
+static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
+ ams_handle_event(indio_dev, bit);
+}
+
+/**
+ * ams_unmask_worker - ams alarm interrupt unmask worker
+ * @work : work to be done
+ *
+ * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active thresholds interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void ams_unmask_worker(struct work_struct *work)
+{
+ struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+ unsigned int status, unmask;
+
+ spin_lock_irq(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &status);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->masked_alarm ^ status) & ams->masked_alarm;
+
+ /* clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ ams_write_reg(ams, AMS_ISR_0, unmask);
+
+ ams_update_intrmask(ams, 0, 0);
+
+ spin_unlock_irq(&ams->lock);
+
+ /* if still pending some alarm re-trigger the timer */
+ if (ams->masked_alarm)
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+}
+
+static irqreturn_t ams_iio_irq(int irq, void *data)
+{
+ unsigned int isr0, isr1;
+ struct iio_dev *indio_dev = data;
+ struct ams *ams = iio_priv(indio_dev);
+
+ spin_lock(&ams->lock);
+
+ ams_read_reg(ams, AMS_ISR_0, &isr0);
+ ams_read_reg(ams, AMS_ISR_1, &isr1);
+
+ /* only process alarm that are not masked */
+ isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->masked_alarm);
+ isr1 &= ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
+
+ /* clear interrupt */
+ ams_write_reg(ams, AMS_ISR_0, isr0);
+ ams_write_reg(ams, AMS_ISR_1, isr1);
+
+ if (isr0) {
+ /* Once the alarm interrupt occurred, mask until get cleared */
+ ams->masked_alarm |= isr0;
+ ams_update_intrmask(ams, 0, 0);
+
+ ams_handle_events(indio_dev, isr0);
+
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
+ }
+
+ spin_unlock(&ams->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_event_spec ams_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ams_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ams_ps_channels[] = {
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "ps_temp"),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE, "remote_temp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccpsintlp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccpsintfp"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccpsaux"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccpsddr"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccpsio3"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccpsio0"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vccpsio1"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vccpsio2"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "psmgtravcc"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "psmgtravtt"),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams"),
+};
+
+static const struct iio_chan_spec ams_pl_channels[] = {
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "pl_temp"),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccint", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, "vccvrefp", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, "vccvrefn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccbram", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccplintlp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccplintfp", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccplaux", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, "vccvpvn", false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vuser0", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vuser1", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "vuser2", true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "vuser3", true),
+ AMS_PL_AUX_CHAN_VOLTAGE(0, "vccaux0"),
+ AMS_PL_AUX_CHAN_VOLTAGE(1, "vccaux1"),
+ AMS_PL_AUX_CHAN_VOLTAGE(2, "vccaux2"),
+ AMS_PL_AUX_CHAN_VOLTAGE(3, "vccaux3"),
+ AMS_PL_AUX_CHAN_VOLTAGE(4, "vccaux4"),
+ AMS_PL_AUX_CHAN_VOLTAGE(5, "vccaux5"),
+ AMS_PL_AUX_CHAN_VOLTAGE(6, "vccaux6"),
+ AMS_PL_AUX_CHAN_VOLTAGE(7, "vccaux7"),
+ AMS_PL_AUX_CHAN_VOLTAGE(8, "vccaux8"),
+ AMS_PL_AUX_CHAN_VOLTAGE(9, "vccaux9"),
+ AMS_PL_AUX_CHAN_VOLTAGE(10, "vccaux10"),
+ AMS_PL_AUX_CHAN_VOLTAGE(11, "vccaux11"),
+ AMS_PL_AUX_CHAN_VOLTAGE(12, "vccaux12"),
+ AMS_PL_AUX_CHAN_VOLTAGE(13, "vccaux13"),
+ AMS_PL_AUX_CHAN_VOLTAGE(14, "vccaux14"),
+ AMS_PL_AUX_CHAN_VOLTAGE(15, "vccaux15"),
+};
+
+static const struct iio_chan_spec ams_ctrl_channels[] = {
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0, "vcc_pspll0"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3, "vcc_psbatt"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT, "vccint"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM, "vccbram"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX, "vccaux"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL, "vcc_psddrpll"),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR, "vccpsintfpddr"),
+};
+
+static int ams_init_module(struct iio_dev *indio_dev, struct device_node *np,
+ struct iio_chan_spec *channels)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct device_node *chan_node, *child;
+ int ret, num_channels = 0;
+ unsigned int reg;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-ams-ps")) {
+ ams->ps_base = of_iomap(np, 0);
+ if (!ams->ps_base)
+ return -ENXIO;
+
+ /* add PS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ps_channels,
+ sizeof(ams_ps_channels));
+ num_channels += ARRAY_SIZE(ams_ps_channels);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams-pl")) {
+ ams->pl_base = of_iomap(np, 0);
+ if (!ams->pl_base)
+ return -ENXIO;
+
+ /* Copy only first 10 fix channels */
+ memcpy(channels + num_channels, ams_pl_channels,
+ AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
+ num_channels += AMS_PL_MAX_FIXED_CHANNEL;
+
+ chan_node = of_get_child_by_name(np, "xlnx,ext-channels");
+ if (chan_node) {
+ for_each_child_of_node(chan_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg > AMS_PL_MAX_EXT_CHANNEL)
+ continue;
+
+ memcpy(&channels[num_channels],
+ &ams_pl_channels[reg +
+ AMS_PL_MAX_FIXED_CHANNEL],
+ sizeof(*channels));
+
+ if (of_property_read_bool(child,
+ "xlnx,bipolar"))
+ channels[num_channels].
+ scan_type.sign = 's';
+
+ num_channels += 1;
+ }
+ }
+ of_node_put(chan_node);
+ } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams")) {
+ /* add AMS channels to iio device channels */
+ memcpy(channels + num_channels, ams_ctrl_channels,
+ sizeof(ams_ctrl_channels));
+ num_channels += ARRAY_SIZE(ams_ctrl_channels);
+ } else {
+ return -EINVAL;
+ }
+
+ return num_channels;
+}
+
+static int ams_parse_dt(struct iio_dev *indio_dev, struct platform_device *pdev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct iio_chan_spec *ams_channels, *dev_channels;
+ struct device_node *child_node = NULL, *np = pdev->dev.of_node;
+ int ret, chan_vol = 0, chan_temp = 0, i, rising_off, falling_off;
+ unsigned int num_channels = 0;
+
+ /* Initialize buffer for channel specification */
+ ams_channels = kzalloc(sizeof(ams_ps_channels) +
+ sizeof(ams_pl_channels) +
+ sizeof(ams_ctrl_channels), GFP_KERNEL);
+ if (!ams_channels)
+ return -ENOMEM;
+
+ if (of_device_is_available(np)) {
+ ret = ams_init_module(indio_dev, np, ams_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+
+ for_each_child_of_node(np, child_node) {
+ if (of_device_is_available(child_node)) {
+ ret = ams_init_module(indio_dev, child_node,
+ ams_channels + num_channels);
+ if (ret < 0) {
+ kfree(ams_channels);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ if (ams_channels[i].type == IIO_VOLTAGE)
+ ams_channels[i].channel = chan_vol++;
+ else
+ ams_channels[i].channel = chan_temp++;
+
+ if (ams_channels[i].scan_index < (PS_SEQ_MAX * 3)) {
+ /* set threshold to max and min for each channel */
+ falling_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_FALLING);
+ rising_off = ams_get_alarm_offset(
+ ams_channels[i].scan_index,
+ IIO_EV_DIR_RISING);
+ if (ams_channels[i].scan_index >= PS_SEQ_MAX) {
+ ams->pl_bus->write(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams->pl_bus->write(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ } else {
+ ams_ps_write_reg(ams, falling_off,
+ AMS_ALARM_THR_MIN);
+ ams_ps_write_reg(ams, rising_off,
+ AMS_ALARM_THR_MAX);
+ }
+ }
+ }
+
+ dev_channels = devm_kzalloc(&pdev->dev, sizeof(*dev_channels) *
+ num_channels, GFP_KERNEL);
+ if (!dev_channels) {
+ kfree(ams_channels);
+ return -ENOMEM;
+ }
+
+ memcpy(dev_channels, ams_channels,
+ sizeof(*ams_channels) * num_channels);
+ kfree(ams_channels);
+ indio_dev->channels = dev_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static const struct iio_info iio_pl_info = {
+ .read_raw = &ams_read_raw,
+ .read_event_config = &ams_read_event_config,
+ .write_event_config = &ams_write_event_config,
+ .read_event_value = &ams_read_event_value,
+ .write_event_value = &ams_write_event_value,
+};
+
+static const struct ams_pl_bus_ops ams_pl_apb = {
+ .read = ams_apb_pl_read_reg,
+ .write = ams_apb_pl_write_reg,
+ .update = ams_apb_pl_update_reg,
+};
+
+static const struct of_device_id ams_of_match_table[] = {
+ { .compatible = "xlnx,zynqmp-ams", &ams_pl_apb },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ams_of_match_table);
+
+static int ams_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct ams *ams;
+ struct resource *res;
+ const struct of_device_id *id;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ id = of_match_node(ams_of_match_table, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ams = iio_priv(indio_dev);
+ ams->pl_bus = id->data;
+ mutex_init(&ams->mutex);
+ spin_lock_init(&ams->lock);
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->name = "ams";
+
+ indio_dev->info = &iio_pl_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ams-base");
+ ams->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ams->base))
+ return PTR_ERR(ams->base);
+
+ INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
+
+ ams->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ams->clk))
+ return PTR_ERR(ams->clk);
+ clk_prepare_enable(ams->clk);
+
+ ret = iio_ams_init_device(ams);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize AMS\n");
+ goto clk_disable;
+ }
+
+ ret = ams_parse_dt(indio_dev, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failure in parsing DT\n");
+ goto clk_disable;
+ }
+
+ ams_enable_channel_sequence(ams);
+
+ ams->irq = platform_get_irq_byname(pdev, "ams-irq");
+ ret = devm_request_irq(&pdev->dev, ams->irq, &ams_iio_irq, 0, "ams-irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register interrupt\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return iio_device_register(indio_dev);
+
+clk_disable:
+ clk_disable_unprepare(ams->clk);
+ return ret;
+}
+
+static int ams_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ cancel_delayed_work(&ams->ams_unmask_work);
+
+ /* Unregister the device */
+ iio_device_unregister(indio_dev);
+ clk_disable_unprepare(ams->clk);
+ return 0;
+}
+
+static int __maybe_unused ams_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_disable_unprepare(ams->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ams_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ams *ams = iio_priv(indio_dev);
+
+ clk_prepare_enable(ams->clk);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+
+static struct platform_driver ams_driver = {
+ .probe = ams_probe,
+ .remove = ams_remove,
+ .driver = {
+ .name = "ams",
+ .pm = &ams_pm_ops,
+ .of_match_table = ams_of_match_table,
+ },
+};
+module_platform_driver(ams_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>");
diff --git a/drivers/iio/adc/xilinx-ams.h b/drivers/iio/adc/xilinx-ams.h
new file mode 100644
index 000000000000..3d900a9df82f
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.h
@@ -0,0 +1,278 @@
+#ifndef __XILINX_AMS_H__
+#define __XILINX_AMS_H__
+
+#define AMS_MISC_CTRL 0x000
+#define AMS_ISR_0 0x010
+#define AMS_ISR_1 0x014
+#define AMS_IMR_0 0x018
+#define AMS_IMR_1 0x01c
+#define AMS_IER_0 0x020
+#define AMS_IER_1 0x024
+#define AMS_IDR_0 0x028
+#define AMS_IDR_1 0x02c
+#define AMS_PS_CSTS 0x040
+#define AMS_PL_CSTS 0x044
+#define AMS_MON_CSTS 0x050
+
+#define AMS_VCC_PSPLL0 0x060
+#define AMS_VCC_PSPLL3 0x06C
+#define AMS_VCCINT 0x078
+#define AMS_VCCBRAM 0x07C
+#define AMS_VCCAUX 0x080
+#define AMS_PSDDRPLL 0x084
+#define AMS_PSINTFPDDR 0x09C
+
+#define AMS_VCC_PSPLL0_CH 48
+#define AMS_VCC_PSPLL3_CH 51
+#define AMS_VCCINT_CH 54
+#define AMS_VCCBRAM_CH 55
+#define AMS_VCCAUX_CH 56
+#define AMS_PSDDRPLL_CH 57
+#define AMS_PSINTFPDDR_CH 63
+
+#define AMS_REG_CONFIG0 0x100
+#define AMS_REG_CONFIG1 0x104
+#define AMS_REG_CONFIG2 0x108
+#define AMS_REG_CONFIG3 0x10C
+#define AMS_REG_CONFIG4 0x110
+#define AMS_REG_SEQ_CH0 0x120
+#define AMS_REG_SEQ_CH1 0x124
+#define AMS_REG_SEQ_CH2 0x118
+
+#define AMS_TEMP 0x000
+#define AMS_SUPPLY1 0x004
+#define AMS_SUPPLY2 0x008
+#define AMS_VP_VN 0x00c
+#define AMS_VREFP 0x010
+#define AMS_VREFN 0x014
+#define AMS_SUPPLY3 0x018
+#define AMS_SUPPLY4 0x034
+#define AMS_SUPPLY5 0x038
+#define AMS_SUPPLY6 0x03c
+#define AMS_SUPPLY7 0x200
+#define AMS_SUPPLY8 0x204
+#define AMS_SUPPLY9 0x208
+#define AMS_SUPPLY10 0x20c
+#define AMS_VCCAMS 0x210
+#define AMS_TEMP_REMOTE 0x214
+
+#define AMS_REG_VAUX(x) (0x40 + (4*(x)))
+#define AMS_REG_VUSER(x) (0x200 + (4*(x)))
+
+#define AMS_PS_RESET_VALUE 0xFFFFU
+#define AMS_PL_RESET_VALUE 0xFFFFU
+
+#define AMS_CONF0_CHANNEL_NUM_MASK (0x3f << 0)
+
+#define AMS_CONF1_SEQ_MASK (0xf << 12)
+#define AMS_CONF1_SEQ_DEFAULT (0 << 12)
+#define AMS_CONF1_SEQ_SINGLE_PASS (1 << 12)
+#define AMS_CONF1_SEQ_CONTINUOUS (2 << 12)
+#define AMS_CONF1_SEQ_SINGLE_CHANNEL (3 << 12)
+
+#define AMS_REG_SEQ0_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK 0x3F
+#define AMS_REG_SEQ1_MASK 0xFFFF
+#define AMS_REG_SEQ2_MASK_SHIFT 16
+#define AMS_REG_SEQ1_MASK_SHIFT 22
+
+#define AMS_REGCFG1_ALARM_MASK 0xF0F
+#define AMS_REGCFG3_ALARM_MASK 0x3F
+
+#define AMS_ALARM_TEMP 0x140
+#define AMS_ALARM_SUPPLY1 0x144
+#define AMS_ALARM_SUPPLY2 0x148
+#define AMS_ALARM_OT 0x14c
+
+#define AMS_ALARM_SUPPLY3 0x160
+#define AMS_ALARM_SUPPLY4 0x164
+#define AMS_ALARM_SUPPLY5 0x168
+#define AMS_ALARM_SUPPLY6 0x16c
+#define AMS_ALARM_SUPPLY7 0x180
+#define AMS_ALARM_SUPPLY8 0x184
+#define AMS_ALARM_SUPPLY9 0x188
+#define AMS_ALARM_SUPPLY10 0x18c
+#define AMS_ALARM_VCCAMS 0x190
+#define AMS_ALARM_TEMP_REMOTE 0x194
+#define AMS_ALARM_THRESOLD_OFF_10 0x10
+#define AMS_ALARM_THRESOLD_OFF_20 0x20
+
+#define AMS_ALARM_THR_DIRECT_MASK 0x01
+#define AMS_ALARM_THR_MIN 0x0000
+#define AMS_ALARM_THR_MAX 0xffff
+
+#define AMS_NO_OF_ALARMS 32
+#define AMS_PL_ALARM_START 16
+#define AMS_ISR0_ALARM_MASK 0xFFFFFFFFU
+#define AMS_ISR1_ALARM_MASK 0xE000001FU
+#define AMS_ISR1_INTR_MASK_SHIFT 32
+#define AMS_ISR0_ALARM_2_TO_0_MASK 0x07
+#define AMS_ISR0_ALARM_6_TO_3_MASK 0x78
+#define AMS_ISR0_ALARM_12_TO_7_MASK 0x3F
+#define AMS_CONF1_ALARM_2_TO_0_SHIFT 1
+#define AMS_CONF1_ALARM_6_TO_3_SHIFT 5
+#define AMS_CONF3_ALARM_12_TO_7_SHIFT 8
+
+#define AMS_PS_CSTS_PS_READY 0x08010000U
+#define AMS_PL_CSTS_ACCESS_MASK 0x00000001U
+
+#define AMS_PL_MAX_FIXED_CHANNEL 10
+#define AMS_PL_MAX_EXT_CHANNEL 20
+
+#define AMS_INIT_TIMEOUT 10000
+
+/* Following scale and offset value is derivef from
+ * UG580 (v1.7) December 20, 2016
+ */
+#define AMS_SUPPLY_SCALE_1VOLT 1000
+#define AMS_SUPPLY_SCALE_3VOLT 3000
+#define AMS_SUPPLY_SCALE_6VOLT 6000
+#define AMS_SUPPLY_SCALE_DIV_BIT 16
+
+#define AMS_TEMP_SCALE 509314
+#define AMS_TEMP_SCALE_DIV_BIT 16
+#define AMS_TEMP_OFFSET -((280230L << 16) / 509314)
+
+enum ams_alarm_bit {
+ AMS_ALARM_BIT_TEMP,
+ AMS_ALARM_BIT_SUPPLY1,
+ AMS_ALARM_BIT_SUPPLY2,
+ AMS_ALARM_BIT_SUPPLY3,
+ AMS_ALARM_BIT_SUPPLY4,
+ AMS_ALARM_BIT_SUPPLY5,
+ AMS_ALARM_BIT_SUPPLY6,
+ AMS_ALARM_BIT_RESERVED,
+ AMS_ALARM_BIT_SUPPLY7,
+ AMS_ALARM_BIT_SUPPLY8,
+ AMS_ALARM_BIT_SUPPLY9,
+ AMS_ALARM_BIT_SUPPLY10,
+ AMS_ALARM_BIT_VCCAMS,
+ AMS_ALARM_BIT_TEMP_REMOTE
+};
+
+enum ams_seq {
+ AMS_SEQ_VCC_PSPLL,
+ AMS_SEQ_VCC_PSBATT,
+ AMS_SEQ_VCCINT,
+ AMS_SEQ_VCCBRAM,
+ AMS_SEQ_VCCAUX,
+ AMS_SEQ_PSDDRPLL,
+ AMS_SEQ_INTDDR
+};
+
+enum ams_ps_pl_seq {
+ AMS_SEQ_CALIB,
+ AMS_SEQ_RSVD_1,
+ AMS_SEQ_RSVD_2,
+ AMS_SEQ_TEST,
+ AMS_SEQ_RSVD_4,
+ AMS_SEQ_SUPPLY4,
+ AMS_SEQ_SUPPLY5,
+ AMS_SEQ_SUPPLY6,
+ AMS_SEQ_TEMP,
+ AMS_SEQ_SUPPLY2,
+ AMS_SEQ_SUPPLY1,
+ AMS_SEQ_VP_VN,
+ AMS_SEQ_VREFP,
+ AMS_SEQ_VREFN,
+ AMS_SEQ_SUPPLY3,
+ AMS_SEQ_CURRENT_MON,
+ AMS_SEQ_SUPPLY7,
+ AMS_SEQ_SUPPLY8,
+ AMS_SEQ_SUPPLY9,
+ AMS_SEQ_SUPPLY10,
+ AMS_SEQ_VCCAMS,
+ AMS_SEQ_TEMP_REMOTE,
+ AMS_SEQ_MAX
+};
+
+#define AMS_SEQ(x) (AMS_SEQ_MAX + (x))
+#define AMS_VAUX_SEQ(x) (AMS_SEQ_MAX + (x))
+
+#define PS_SEQ_MAX AMS_SEQ_MAX
+#define PS_SEQ(x) (x)
+#define PL_SEQ(x) (PS_SEQ_MAX + x)
+
+#define AMS_CHAN_TEMP(_scan_index, _addr, _ext) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = ams_temp_events, \
+ .num_event_specs = ARRAY_SIZE(ams_temp_events), \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .event_spec = (_alarm) ? ams_voltage_events : NULL, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+ .scan_index = (_scan_index), \
+ .scan_type = { \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = 6, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _ext, \
+}
+
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr, _ext)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, _ext, true)
+
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr, _ext) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr, _ext)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _ext, _alarm) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _ext, _alarm)
+#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(_auxno)), \
+ AMS_REG_VAUX(_auxno), _ext, false)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr, _ext) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_VAUX_SEQ(AMS_SEQ(_scan_index))), \
+ _addr, _ext, false)
+
+struct ams {
+ void __iomem *base;
+ void __iomem *ps_base;
+ void __iomem *pl_base;
+ struct clk *clk;
+ struct device *dev;
+
+ struct mutex mutex;
+ spinlock_t lock;
+
+ unsigned int alarm_mask;
+ unsigned int masked_alarm;
+ u64 intr_mask;
+ int irq;
+
+ struct delayed_work ams_unmask_work;
+ const struct ams_pl_bus_ops *pl_bus;
+};
+
+struct ams_pl_bus_ops {
+ void (*read)(struct ams *ams, unsigned int offset, unsigned int *data);
+ void (*write)(struct ams *ams, unsigned int offset, unsigned int data);
+ void (*update)(struct ams *ams, unsigned int offset, u32 mask,
+ u32 data);
+};
+
+#endif /* __XILINX_AMS_H__ */
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 6fd06e4eff73..a595ab6fb05d 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -94,6 +94,9 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_REG_IPIER 0x68
#define XADC_AXI_ADC_REG_OFFSET 0x200
+/* AXI sysmon offset */
+#define XADC_AXI_SYSMON_REG_OFFSET 0x400
+
#define XADC_AXI_RESET_MAGIC 0xa
#define XADC_AXI_GIER_ENABLE BIT(31)
@@ -468,6 +471,26 @@ static int xadc_axi_write_adc_reg(struct xadc *xadc, unsigned int reg,
return 0;
}
+/* AXI sysmon read/write methods */
+static int xadc_axi_read_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t *val)
+{
+ uint32_t val32;
+
+ xadc_read_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, &val32);
+ *val = val32 & 0xffff;
+
+ return 0;
+}
+
+static int xadc_axi_write_sysmon_reg(struct xadc *xadc, unsigned int reg,
+ uint16_t val)
+{
+ xadc_write_reg(xadc, XADC_AXI_SYSMON_REG_OFFSET + reg * 4, val);
+
+ return 0;
+}
+
static int xadc_axi_setup(struct platform_device *pdev,
struct iio_dev *indio_dev, int irq)
{
@@ -551,6 +574,17 @@ static const struct xadc_ops xadc_axi_ops = {
.flags = XADC_FLAGS_BUFFERED,
};
+/* AXI sysmon */
+static const struct xadc_ops sysmon_axi_ops = {
+ .read = xadc_axi_read_sysmon_reg,
+ .write = xadc_axi_write_sysmon_reg,
+ .setup = xadc_axi_setup,
+ .get_dclk_rate = xadc_axi_get_dclk,
+ .update_alarm = xadc_axi_update_alarm,
+ .interrupt_handler = xadc_axi_interrupt_handler,
+ .flags = XADC_FLAGS_BUFFERED,
+};
+
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
uint16_t mask, uint16_t val)
{
@@ -1055,23 +1089,23 @@ static const struct iio_chan_spec xadc_channels[] = {
XADC_CHAN_VOLTAGE(5, 7, XADC_REG_VCCO_DDR, "vccoddr", true),
XADC_CHAN_VOLTAGE(6, 12, XADC_REG_VREFP, "vrefp", false),
XADC_CHAN_VOLTAGE(7, 13, XADC_REG_VREFN, "vrefn", false),
- XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, NULL, false),
- XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), NULL, false),
- XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), NULL, false),
- XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), NULL, false),
- XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), NULL, false),
- XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), NULL, false),
- XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), NULL, false),
- XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), NULL, false),
- XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), NULL, false),
- XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), NULL, false),
- XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), NULL, false),
- XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), NULL, false),
- XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), NULL, false),
- XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), NULL, false),
- XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), NULL, false),
- XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), NULL, false),
- XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), NULL, false),
+ XADC_CHAN_VOLTAGE(8, 11, XADC_REG_VPVN, "vpvn", false),
+ XADC_CHAN_VOLTAGE(9, 16, XADC_REG_VAUX(0), "vaux0", false),
+ XADC_CHAN_VOLTAGE(10, 17, XADC_REG_VAUX(1), "vaux1", false),
+ XADC_CHAN_VOLTAGE(11, 18, XADC_REG_VAUX(2), "vaux2", false),
+ XADC_CHAN_VOLTAGE(12, 19, XADC_REG_VAUX(3), "vaux3", false),
+ XADC_CHAN_VOLTAGE(13, 20, XADC_REG_VAUX(4), "vaux4", false),
+ XADC_CHAN_VOLTAGE(14, 21, XADC_REG_VAUX(5), "vaux5", false),
+ XADC_CHAN_VOLTAGE(15, 22, XADC_REG_VAUX(6), "vaux6", false),
+ XADC_CHAN_VOLTAGE(16, 23, XADC_REG_VAUX(7), "vaux7", false),
+ XADC_CHAN_VOLTAGE(17, 24, XADC_REG_VAUX(8), "vaux8", false),
+ XADC_CHAN_VOLTAGE(18, 25, XADC_REG_VAUX(9), "vaux9", false),
+ XADC_CHAN_VOLTAGE(19, 26, XADC_REG_VAUX(10), "vaux10", false),
+ XADC_CHAN_VOLTAGE(20, 27, XADC_REG_VAUX(11), "vaux11", false),
+ XADC_CHAN_VOLTAGE(21, 28, XADC_REG_VAUX(12), "vaux12", false),
+ XADC_CHAN_VOLTAGE(22, 29, XADC_REG_VAUX(13), "vaux13", false),
+ XADC_CHAN_VOLTAGE(23, 30, XADC_REG_VAUX(14), "vaux14", false),
+ XADC_CHAN_VOLTAGE(24, 31, XADC_REG_VAUX(15), "vaux15", false),
};
static const struct iio_info xadc_info = {
@@ -1087,6 +1121,7 @@ static const struct iio_info xadc_info = {
static const struct of_device_id xadc_of_match_table[] = {
{ .compatible = "xlnx,zynq-xadc-1.00.a", (void *)&xadc_zynq_ops },
{ .compatible = "xlnx,axi-xadc-1.00.a", (void *)&xadc_axi_ops },
+ { .compatible = "xlnx,axi-sysmon-1.3", (void *)&sysmon_axi_ops},
{ },
};
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
@@ -1095,7 +1130,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
unsigned int *conf)
{
struct xadc *xadc = iio_priv(indio_dev);
- struct iio_chan_spec *channels, *chan;
+ struct iio_chan_spec *iio_xadc_channels;
struct device_node *chan_node, *child;
unsigned int num_channels;
const char *external_mux;
@@ -1138,12 +1173,12 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
*conf |= XADC_CONF0_MUX | XADC_CONF0_CHAN(ext_mux_chan);
}
- channels = kmemdup(xadc_channels, sizeof(xadc_channels), GFP_KERNEL);
- if (!channels)
+ iio_xadc_channels = kmemdup(xadc_channels, sizeof(xadc_channels),
+ GFP_KERNEL);
+ if (!iio_xadc_channels)
return -ENOMEM;
num_channels = 9;
- chan = &channels[9];
chan_node = of_get_child_by_name(np, "xlnx,channels");
if (chan_node) {
@@ -1157,28 +1192,24 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
if (ret || reg > 16)
continue;
+ iio_xadc_channels[num_channels] = xadc_channels[reg + 9];
+ iio_xadc_channels[num_channels].channel = num_channels - 1;
+
if (of_property_read_bool(child, "xlnx,bipolar"))
- chan->scan_type.sign = 's';
-
- if (reg == 0) {
- chan->scan_index = 11;
- chan->address = XADC_REG_VPVN;
- } else {
- chan->scan_index = 15 + reg;
- chan->address = XADC_REG_VAUX(reg - 1);
- }
+ iio_xadc_channels[num_channels].scan_type.sign = 's';
+
num_channels++;
- chan++;
}
}
of_node_put(chan_node);
indio_dev->num_channels = num_channels;
- indio_dev->channels = krealloc(channels, sizeof(*channels) *
- num_channels, GFP_KERNEL);
+ indio_dev->channels = krealloc(iio_xadc_channels,
+ sizeof(*iio_xadc_channels) *
+ num_channels, GFP_KERNEL);
/* If we can't resize the channels array, just use the original */
if (!indio_dev->channels)
- indio_dev->channels = channels;
+ indio_dev->channels = iio_xadc_channels;
return 0;
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 882204d1ef4f..fd0d65e9c53c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -345,6 +345,17 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ struct cpumask mask;
+
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ gic_set_affinity(d, &mask, true);
+}
+EXPORT_SYMBOL(gic_set_cpu);
#endif
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
@@ -795,18 +806,19 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
int cpu;
unsigned long flags, map = 0;
+#if 0
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
writel_relaxed(2 << 24 | irq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
-
+#endif
gic_lock_irqsave(flags);
/* Convert our logical CPU mask into a physical one. */
@@ -824,6 +836,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
gic_unlock_irqrestore(flags);
}
+EXPORT_SYMBOL(gic_raise_softirq);
#endif
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 1d3d273309bd..2c3184ecfd33 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -18,6 +18,8 @@
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <linux/of_irq.h>
+#include <linux/cpuhotplug.h>
+#include <linux/smp.h>
/* No one else should require these constants, so define them locally here. */
#define ISR 0x00 /* Interrupt Status Register */
@@ -36,15 +38,19 @@ static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
struct xintc_irq_chip {
void __iomem *base;
- struct irq_domain *root_domain;
+ struct irq_domain *domain;
u32 intr_mask;
u32 nr_irq;
+ u32 sw_irq;
};
-static struct xintc_irq_chip *primary_intc;
+static DEFINE_PER_CPU(struct xintc_irq_chip, primary_intc);
static void xintc_write(struct xintc_irq_chip *irqc, int reg, u32 data)
{
+ if (!irqc)
+ irqc = per_cpu_ptr(&primary_intc, smp_processor_id());
+
if (static_branch_unlikely(&xintc_is_be))
iowrite32be(data, irqc->base + reg);
else
@@ -59,6 +65,26 @@ static u32 xintc_read(struct xintc_irq_chip *irqc, int reg)
return ioread32(irqc->base + reg);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_MICROBLAZE)
+static DEFINE_RAW_SPINLOCK(ipi_lock);
+
+static void send_ipi(unsigned int cpu, unsigned int ipi_number)
+{
+ unsigned long flags;
+ struct xintc_irq_chip *irqc = per_cpu_ptr(&primary_intc, cpu);
+ u32 sw_irq = 1 << (ipi_number + irqc->nr_irq);
+
+ pr_debug("%s: cpu: %u, sends IPI: %d to cpu: %u, sw_irq %x\n",
+ __func__, smp_processor_id(), ipi_number, cpu, sw_irq);
+
+ raw_spin_lock_irqsave(&ipi_lock, flags);
+
+ xintc_write(irqc, ISR, sw_irq);
+
+ raw_spin_unlock_irqrestore(&ipi_lock, flags);
+}
+#endif
+
static void intc_enable_or_unmask(struct irq_data *d)
{
struct xintc_irq_chip *irqc = irq_data_get_irq_chip_data(d);
@@ -117,21 +143,7 @@ static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc)
hwirq = xintc_read(irqc, IVR);
if (hwirq != -1U)
- irq = irq_find_mapping(irqc->root_domain, hwirq);
-
- pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
-unsigned int xintc_get_irq(void)
-{
- unsigned int irq = -1;
- u32 hwirq;
-
- hwirq = xintc_read(primary_intc, IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(primary_intc->root_domain, hwirq);
+ irq = irq_find_mapping(irqc->domain, hwirq);
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
@@ -141,17 +153,49 @@ unsigned int xintc_get_irq(void)
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
struct xintc_irq_chip *irqc = d->host_data;
+ u32 edge = local_intc->intr_mask & (1 << hw);
+
+ /*
+ * Find out which irq_domain this IRQ is assigned to. If it is assigned
+ * to root domain then do not fill chip_data and set it up in the code
+ */
+ if (irq_get_default_host() != d)
+ irq_set_chip_data(irq, local_intc);
+ else {
+ local_intc = per_cpu_ptr(&primary_intc, 0);
+ irq_set_chip_data(irq, NULL);
+ }
- if (irqc->intr_mask & BIT(hw)) {
+ if (edge) {
+#if defined(CONFIG_SMP) && defined(CONFIG_MICROBLAZE)
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
+ handle_percpu_irq, "percpu");
+#else
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_edge_irq, "edge");
+#endif
irq_clear_status_flags(irq, IRQ_LEVEL);
} else {
+#if defined(CONFIG_SMP) && defined(CONFIG_MICROBLAZE)
+ irq_set_chip_and_handler_name(irq, local_intc->intc_dev,
+ handle_percpu_irq, "percpu");
+#else
irq_set_chip_and_handler_name(irq, &intc_dev,
handle_level_irq, "level");
+#endif
irq_set_status_flags(irq, IRQ_LEVEL);
}
- irq_set_chip_data(irq, irqc);
+
+ /*
+ * Setup all IRQs to be per CPU because servicing it by different
+ * cpu is not implemented yet. And for uniprocessor system this flag
+ * is nop all time time.
+ */
+ irq_set_status_flags(irq, IRQ_PER_CPU);
+
+ pr_debug("cpu: %u, xintc_map: hwirq=%u, irq=%u, edge=%u\n",
+ smp_processor_id(), (u32)hw, irq, edge);
+
return 0;
}
@@ -160,6 +204,35 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
.map = xintc_map,
};
+static void xil_intc_initial_setup(struct xintc_irq_chip *irqc)
+{
+ int i;
+ u32 mask;
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ xintc_write(irqc, IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(irqc, IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(irqc, MER, MER_HIE | MER_ME);
+ }
+
+ /* Enable all SW IRQs */
+ for (i = 0; i < irqc->sw_irq; i++) {
+ mask = 1 << (i + irqc->nr_irq);
+ irqc->write_fn(irqc->base + IAR, mask);
+ irqc->write_fn(irqc->base + SIE, mask);
+ }
+}
+
static void xil_intc_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -177,15 +250,80 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static int xil_intc_start(unsigned int cpu)
+{
+ struct xintc_irq_chip *irqc = per_cpu_ptr(&primary_intc, cpu);
+
+ pr_debug("%s: intc cpu %d\n", __func__, cpu);
+
+ xil_intc_initial_setup(irqc);
+
+ return 0;
+}
+
+static int xil_intc_stop(unsigned int cpu)
+{
+ pr_debug("%s: intc cpu %d\n", __func__, cpu);
+
+ return 0;
+}
+
+static void xil_intc_handle_irq(struct pt_regs *regs)
+{
+ int ret;
+ unsigned int hwirq, cpu_id = smp_processor_id();
+ struct xintc_irq_chip *irqc = per_cpu_ptr(&primary_intc, cpu_id);
+
+ do {
+ hwirq = irqc->read_fn(irqc->base + IVR);
+ if (hwirq != -1U) {
+ if (hwirq >= irqc->nr_irq) {
+#if defined(CONFIG_SMP) && defined(CONFIG_MICROBLAZE)
+ handle_IPI(hwirq - irqc->nr_irq, regs);
+#else
+ WARN_ONCE(1, "SW interrupt not handled\n");
+#endif
+ /* ACK is necessary */
+ irqc->write_fn(irqc->base + IAR, 1 << hwirq);
+ continue;
+ } else {
+ ret = handle_domain_irq(irqc->domain,
+ hwirq, regs);
+ WARN_ONCE(ret, "cpu %d: Unhandled HWIRQ %d\n",
+ cpu_id, hwirq);
+ continue;
+ }
+ }
+
+ break;
+ } while (1);
+}
+
static int __init xilinx_intc_of_init(struct device_node *intc,
struct device_node *parent)
{
struct xintc_irq_chip *irqc;
int ret, irq;
+ u32 cpu_id = 0;
+
+ ret = of_property_read_u32(intc, "cpu-id", &cpu_id);
+ if (ret < 0)
+ pr_debug("%s: %pOF: cpu_id not found\n", __func__, intc);
+
+ /* No parent means it is primary intc */
+ if (!parent) {
+ irqc = per_cpu_ptr(&primary_intc, cpu_id);
+ if (irqc->base) {
+ pr_err("%pOF: %s: cpu %d has already irq controller\n",
+ intc, __func__, cpu_id);
+ return -EINVAL;
+ }
+ } else {
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+ }
- irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
- return -ENOMEM;
irqc->base = of_iomap(intc, 0);
BUG_ON(!irqc->base);
@@ -204,29 +342,21 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
if (irqc->intr_mask >> irqc->nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
- pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
- intc, irqc->nr_irq, irqc->intr_mask);
-
-
- /*
- * Disable all external interrupts until they are
- * explicity requested.
- */
- xintc_write(irqc, IER, 0);
+ /* sw irqs are optinal */
+ of_property_read_u32(intc, "xlnx,num-sw-intr", &irqc->sw_irq);
- /* Acknowledge any pending interrupts just in case. */
- xintc_write(irqc, IAR, 0xffffffff);
+ pr_info("irq-xilinx: %pOF: num_irq=%d, sw_irq=%d, edge=0x%x\n",
+ intc, irqc->nr_irq, irqc->sw_irq, irqc->intr_mask);
- /* Turn on the Master Enable. */
- xintc_write(irqc, MER, MER_HIE | MER_ME);
- if (xintc_read(irqc, MER) != (MER_HIE | MER_ME)) {
- static_branch_enable(&xintc_is_be);
- xintc_write(irqc, MER, MER_HIE | MER_ME);
+ /* Right now enable only SW IRQs on that IP and wait */
+ if (cpu_id) {
+ xil_intc_initial_setup(irqc);
+ return 0;
}
- irqc->root_domain = irq_domain_add_linear(intc, irqc->nr_irq,
+ irqc->domain = irq_domain_add_linear(intc, irqc->nr_irq,
&xintc_irq_domain_ops, irqc);
- if (!irqc->root_domain) {
+ if (!irqc->domain) {
pr_err("irq-xilinx: Unable to create IRQ domain\n");
ret = -EINVAL;
goto error;
@@ -243,18 +373,32 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
ret = -EINVAL;
goto error;
}
- } else {
- primary_intc = irqc;
- irq_set_default_host(primary_intc->root_domain);
+ xil_intc_initial_setup(irqc);
+ return 0;
}
- return 0;
+ /*
+ * Set default domain here because for other root intc
+ * irq_find_mapping() will use irq_default_domain as fallback
+ */
+ irq_set_default_host(irqc->domain);
+ set_handle_irq(xil_intc_handle_irq);
+
+ ret = cpuhp_setup_state(CPUHP_AP_IRQ_XILINX_STARTING,
+ "microblaze/arch_intc:starting",
+ xil_intc_start, xil_intc_stop);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MICROBLAZE)
+ set_smp_cross_call(send_ipi);
+#endif
+
+ return ret;
error:
iounmap(irqc->base);
- kfree(irqc);
+ if (parent)
+ kfree(irqc);
return ret;
-
}
IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 62763ec4cd07..6ed9728d54cc 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -52,6 +52,7 @@ MODULE_LICENSE("GPL v2");
#define ADV7511_MAX_HEIGHT 1200
#define ADV7511_MIN_PIXELCLOCK 20000000
#define ADV7511_MAX_PIXELCLOCK 225000000
+#define XYLON_LOGICVC_INTG
#define ADV7511_MAX_ADDRS (3)
@@ -79,7 +80,50 @@ struct adv7511_state_edid {
bool complete;
};
+struct adv7511_in_params {
+ uint8_t input_id;
+ uint8_t input_style;
+ uint8_t input_color_depth;
+ uint8_t bit_justification;
+ uint8_t hsync_polarity;
+ uint8_t vsync_polarity;
+ uint8_t clock_delay;
+};
+
+struct adv7511_csc_coeff {
+ uint16_t a1;
+ uint16_t a2;
+ uint16_t a3;
+ uint16_t a4;
+ uint16_t b1;
+ uint16_t b2;
+ uint16_t b3;
+ uint16_t b4;
+ uint16_t c1;
+ uint16_t c2;
+ uint16_t c3;
+ uint16_t c4;
+};
+
+struct adv7511_out_params {
+ bool hdmi_mode;
+ uint8_t output_format;
+ uint8_t output_color_space;
+ uint8_t up_conversion;
+ uint8_t csc_enable;
+ uint8_t csc_scaling_factor;
+ struct adv7511_csc_coeff csc_coeff;
+};
+
+struct adv7511_config {
+ struct adv7511_in_params in_params;
+ struct adv7511_out_params out_params;
+ bool embedded_sync;
+ bool loaded;
+};
+
struct adv7511_state {
+ struct adv7511_config cfg;
struct adv7511_platform_data pdata;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -379,6 +423,10 @@ static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l
{
struct adv7511_state *state = get_adv7511_state(sd);
+#ifdef XYLON_LOGICVC_INTG
+ return 0;
+#endif
+
/* Only makes sense for RGB formats */
if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
/* so just keep quantization */
@@ -1527,34 +1575,278 @@ static void adv7511_audio_setup(struct v4l2_subdev *sd)
adv7511_s_routing(sd, 0, 0, 0);
}
-/* Configure hdmi transmitter. */
-static void adv7511_setup(struct v4l2_subdev *sd)
+static void adv7511_set_ofdt_config(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_config *config = &state->cfg;
+ uint8_t val_mask, val;
v4l2_dbg(1, debug, sd, "%s\n", __func__);
- /* Input format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
- /* Output format: RGB 4:4:4 */
- adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
- /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
- adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
- /* Disable pixel repetition */
- adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
- /* Disable CSC */
- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
- /* Output format: RGB 4:4:4, Active Format Information is valid,
- * underscanned */
- adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
- /* AVI Info frame packet enable, Audio Info frame disable */
+ /* Input format */
+ val_mask = 0;
+ switch (config->in_params.input_id) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ config->embedded_sync = true;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ config->embedded_sync = true;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ case 8:
+ val = 0x08;
+ config->embedded_sync = true;
+ break;
+ }
+ val_mask |= val;
+ adv7511_wr(sd, 0x15, val_mask);
+
+ /* Output format */
+ val_mask = 0;
+ switch (config->out_params.output_color_space) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 0);
+ switch (config->in_params.input_style) {
+ case 1:
+ val = 0x02;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.input_color_depth) {
+ case 8:
+ val = 0x03;
+ break;
+ case 10:
+ val = 0x01;
+ break;
+ case 12:
+ val = 0x02;
+ break;
+ default:
+ val = 0x00;
+ break;
+ }
+ val_mask |= (val << 4);
+ switch (config->out_params.output_format) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 7);
+ adv7511_wr(sd, 0x16, val_mask);
+
+ /* H, V sync polarity, interpolation style */
+ val_mask = 0;
+ switch (config->out_params.up_conversion) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 2);
+ switch (config->in_params.hsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 5);
+ switch (config->in_params.vsync_polarity) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ }
+ val_mask |= (val << 6);
+ adv7511_wr(sd, 0x17, val_mask);
+
+ /* CSC mode, CSC coefficients */
+ if (config->out_params.csc_enable) {
+ switch (config->out_params.csc_scaling_factor) {
+ case 1:
+ val = 0x00;
+ break;
+ case 2:
+ val = 0x01;
+ break;
+ case 4:
+ default:
+ val = 0x02;
+ break;
+ }
+ adv7511_csc_conversion_mode(sd, val);
+ adv7511_csc_coeff(sd,
+ config->out_params.csc_coeff.a1,
+ config->out_params.csc_coeff.a2,
+ config->out_params.csc_coeff.a3,
+ config->out_params.csc_coeff.a4,
+ config->out_params.csc_coeff.b1,
+ config->out_params.csc_coeff.b2,
+ config->out_params.csc_coeff.b3,
+ config->out_params.csc_coeff.b4,
+ config->out_params.csc_coeff.c1,
+ config->out_params.csc_coeff.c2,
+ config->out_params.csc_coeff.c3,
+ config->out_params.csc_coeff.c4);
+ /* enable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+ /* AVI infoframe: Limited range RGB (16-235) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+ }
+
+ /* AVI Info, Audio Info */
adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
- /* Colorimetry, Active format aspect ratio: same as picure. */
- adv7511_wr(sd, 0x56, 0xa8);
- /* No encryption */
- adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
- /* Positive clk edge capture for input video clock */
- adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ /* Video input justification */
+ val_mask = 0;
+ switch (config->in_params.bit_justification) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ }
+ val_mask |= (val << 3);
+ adv7511_wr(sd, 0x48, val_mask);
+
+ /* Output format */
+ val_mask = 0x00;
+ if (config->out_params.output_format == 1) {
+ if (config->out_params.output_color_space == 0)
+ val_mask = 0x02;
+ else if (config->out_params.output_format == 1)
+ val_mask = 0x01;
+ }
+ val_mask <<= 5;
+ adv7511_wr(sd, 0x55, val_mask);
+
+ /* Picture format aspect ratio */
+ adv7511_wr(sd, 0x56, 0x28);
+
+ /* HDCP, Frame encryption, HDMI/DVI */
+ val_mask = 0x04;
+ if (config->out_params.hdmi_mode)
+ val_mask |= 0x02;
+ adv7511_wr(sd, 0xaf, val_mask);
+
+ /* Capture for input video clock */
+ val_mask = 0;
+ switch (config->in_params.clock_delay) {
+ default:
+ case 0:
+ val = 0x00;
+ break;
+ case 1:
+ val = 0x01;
+ break;
+ case 2:
+ val = 0x02;
+ break;
+ case 3:
+ val = 0x03;
+ break;
+ case 4:
+ val = 0x04;
+ break;
+ case 5:
+ val = 0x05;
+ break;
+ case 6:
+ val = 0x06;
+ break;
+ case 7:
+ val = 0x07;
+ break;
+ }
+ val_mask |= (val << 5);
+ adv7511_wr_and_or(sd, 0xba, 0x1f, val_mask);
+}
+
+/* Configure hdmi transmitter. */
+static void adv7511_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ if (!state->cfg.loaded) {
+ /* Input format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+ /* Output format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+ /* Disable pixel repetition */
+ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+ /* Disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* Output format: RGB 4:4:4, Active Format Information is valid,
+ * underscanned */
+ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+ /* AVI Info frame packet enable, Audio Info frame disable */
+ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+ /* Colorimetry, Active format aspect ratio: same as picure. */
+ adv7511_wr(sd, 0x56, 0xa8);
+ /* No encryption */
+ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+
+ /* Positive clk edge capture for input video clock */
+ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+ } else {
+ adv7511_set_ofdt_config(sd);
+ }
adv7511_audio_setup(sd);
@@ -1792,6 +2084,181 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
adv7511_cec_write(sd, 0x4e, ratio << 2);
}
+
+static void adv7511_get_ofdt_config(struct i2c_client *client,
+ struct adv7511_state *state)
+{
+ struct device_node *dn = client->dev.of_node;
+ struct device_node *np;
+ struct adv7511_config *config = &state->cfg;
+ u32 const *prop;
+ int size;
+ bool vin_loaded, vout_loaded;
+
+ vin_loaded = vout_loaded = false;
+
+ prop = of_get_property(dn, "edid-addr", &size);
+ if (prop)
+ state->pdata.i2c_edid = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "pktmem-addr", &size);
+ if (prop)
+ state->pdata.i2c_pktmem = (uint8_t)be32_to_cpup(prop);
+
+ prop = of_get_property(dn, "cec-addr", &size);
+ if (prop)
+ state->pdata.i2c_cec = (uint8_t)be32_to_cpup(prop);
+
+ np = of_find_node_by_name(dn, "video-input");
+ if (np) {
+ prop = of_get_property(np, "input-id", &size);
+ if (prop)
+ config->in_params.input_id =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-style", &size);
+ if (prop)
+ config->in_params.input_style =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "input-color-depth", &size);
+ if (prop)
+ config->in_params.input_color_depth =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "bit-justification", &size);
+ if (prop)
+ config->in_params.bit_justification =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "hsync-polarity", &size);
+ if (prop)
+ config->in_params.hsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "vsync-polarity", &size);
+ if (prop)
+ config->in_params.vsync_polarity =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "clock-delay", &size);
+ if (prop)
+ config->in_params.clock_delay =
+ (uint8_t)be32_to_cpup(prop);
+ vin_loaded = true;
+ } else {
+ pr_info("No video input configuration, using device default\n");
+ }
+
+ np = of_find_node_by_name(dn, "video-output");
+ if (np) {
+ prop = of_get_property(np, "hdmi-mode", &size);
+ if (prop) {
+ if (be32_to_cpup(prop) == 1)
+ config->out_params.hdmi_mode = true;
+ }
+ prop = of_get_property(np, "output-format", &size);
+ if (prop)
+ config->out_params.output_format =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "output-color-space", &size);
+ if (prop)
+ config->out_params.output_color_space =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "up-conversion", &size);
+ if (prop)
+ config->out_params.up_conversion =
+ (uint8_t)be32_to_cpup(prop);
+ prop = of_get_property(np, "csc-enable", &size);
+ if (prop)
+ config->out_params.csc_enable =
+ (uint8_t)be32_to_cpup(prop);
+ if (config->out_params.csc_enable) {
+ prop = of_get_property(np, "csc-scaling-factor", &size);
+ if (prop) {
+ config->out_params.csc_scaling_factor =
+ (uint8_t)be32_to_cpup(prop);
+ }
+ np = of_find_node_by_name(dn, "csc-coefficients");
+ if (np) {
+ prop = of_get_property(np, "a1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "a4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.a4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "b4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.b4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c1", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c1 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c2", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c2 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c3", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c3 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ prop = of_get_property(np, "c4", &size);
+ if (prop) {
+ config->out_params.csc_coeff.c4 =
+ (uint16_t)be32_to_cpup(prop);
+ }
+ } else {
+ pr_info("No CSC coefficients, using default\n");
+ }
+ }
+ vout_loaded = true;
+ } else {
+ pr_info("No video output configuration, using device default\n");
+ }
+
+ if (vin_loaded && vout_loaded)
+ config->loaded = true;
+}
+
+struct v4l2_subdev *adv7511_subdev(struct v4l2_subdev *sd)
+{
+ static struct v4l2_subdev *subdev;
+
+ if (sd)
+ subdev = sd;
+
+ return subdev;
+}
+EXPORT_SYMBOL(adv7511_subdev);
+
static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct adv7511_state *state;
@@ -1809,11 +2276,17 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
if (!state)
return -ENOMEM;
- /* Platform data */
- if (!pdata) {
- v4l_err(client, "No platform data!\n");
- return -ENODEV;
+ if (client->dev.of_node) {
+ adv7511_get_ofdt_config(client, state);
+ } else {
+ /* Platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
}
+
memcpy(&state->pdata, pdata, sizeof(state->pdata));
state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
state->colorspace = V4L2_COLORSPACE_SRGB;
@@ -1824,6 +2297,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
client->addr << 1);
v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+ adv7511_subdev(sd);
sd->internal_ops = &adv7511_int_ops;
hdl = &state->hdl;
@@ -1917,7 +2391,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+#ifndef XYLON_LOGICVC_INTG
adv7511_init_setup(sd);
+#endif
#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
@@ -1931,8 +2407,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
#endif
adv7511_set_isr(sd, true);
+#ifndef XYLON_LOGICVC_INTG
adv7511_check_monitor_present_status(sd);
-
+#endif
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 2fe4a7ac0592..7ec2151da2f5 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -132,6 +132,8 @@ static const struct ov5640_pixfmt ov5640_formats[] = {
{ MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_COLORSPACE_SRGB, },
+ { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
@@ -1459,6 +1461,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor)
light_freq = 50;
} else {
/* 60Hz */
+ light_freq = 60;
}
}
@@ -2268,11 +2271,13 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor,
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
/* YUV422, UYVY */
fmt = 0x3f;
mux = OV5640_FMT_MUX_YUV422;
break;
case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
/* YUV422, YUYV */
fmt = 0x30;
mux = OV5640_FMT_MUX_YUV422;
@@ -2588,6 +2593,13 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
/* v4l2_ctrl_lock() locks our own mutex */
+ /*
+ * If the sensor is not powered up by the host driver, do
+ * not try to access it to update the volatile controls.
+ */
+ if (sensor->power_count == 0)
+ return 0;
+
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
val = ov5640_get_gain(sensor);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 211279c5fd77..2272a18d275e 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -264,7 +264,36 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define stack_top(en) ((en)->stack[(en)->top].entity)
/**
- * media_graph_walk_init - Allocate resources for graph walk
+ * media_entity_has_route - Check if two entity pads are connected internally
+ * @entity: The entity
+ * @pad0: The first pad index
+ * @pad1: The second pad index
+ *
+ * This function can be used to check whether two pads of an entity are
+ * connected internally in the entity.
+ *
+ * The caller must hold entity->source->parent->mutex.
+ *
+ * Return: true if the pads are connected internally and false otherwise.
+ */
+bool media_entity_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+ return false;
+
+ if (pad0 == pad1)
+ return true;
+
+ if (!entity->ops || !entity->ops->has_route)
+ return true;
+
+ return entity->ops->has_route(entity, pad0, pad1);
+}
+EXPORT_SYMBOL_GPL(media_entity_has_route);
+
+/**
+ * media_entity_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
* @mdev: Media device
*
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index a2773ad7c185..4420e117b1f4 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -4,23 +4,154 @@ config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
+ select DMADEVICES
+ select XILINX_FRMBUF
select V4L2_FWNODE
help
Driver for Xilinx Video IP Pipelines
if VIDEO_XILINX
+config VIDEO_XILINX_AXI4S_SWITCH
+ tristate "Xilinx AXI4-Stream Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx AXI4-Stream Video Switch. This is a
+ V4L sub device based driver. It supports fixed (TDEST based)
+ as well as dynamic (control register based) routing.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_CFA
+ tristate "Xilinx Video Color Filter Array"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Color Filter Array
+
+config VIDEO_XILINX_CRESAMPLE
+ tristate "Xilinx Video Chroma Resampler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Chroma Resampler
+
+config VIDEO_XILINX_DEMOSAIC
+ tristate "Xilinx Video Demosaic IP"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Demosaic IP. This is a V4L sub-device
+ based driver for the Demosaic IP that takes input a Bayer video
+ stream format as input and generates an RGB video output.
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_GAMMA
+ tristate "Xilinx Gamma Correction LUT"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Gamma Correction LUT IP. This is a V4L sub-device
+ based driver that exposes V4L controls to adjust Red, Blue and Green
+ Gamma Correction.
+
+ Say M to modularize. Say N if unsure.
+
+config VIDEO_XILINX_HLS
+ tristate "Xilinx Video HLS Core"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video HLS Cores
+
+config VIDEO_XILINX_REMAPPER
+ tristate "Xilinx Video Remapper"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Remapper
+
+config VIDEO_XILINX_RGB2YUV
+ tristate "Xilinx Video RGB to YUV Convertor"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video RGB to YUV Convertor
+
+config VIDEO_XILINX_SCALER
+ tristate "Xilinx Video Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Scaler
+
+config VIDEO_XILINX_MULTISCALER
+ tristate "Xilinx Video Multiscaler"
+ depends on VIDEO_XILINX
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Driver for the Xilinx Video Multi Scaler. This is a V4L2 memory to
+ memory based driver. Multi-Scaler has max 8 channels which can be
+ programed for different scaling ratio.
+
+config VIDEO_XILINX_SDIRXSS
+ tristate "Xilinx SDI Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx SDI Rx Subsystem
+
+config VIDEO_XILINX_SWITCH
+ tristate "Xilinx Video Switch"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Switch
+
config VIDEO_XILINX_TPG
tristate "Xilinx Video Test Pattern Generator"
depends on VIDEO_XILINX
select VIDEO_XILINX_VTC
help
- Driver for the Xilinx Video Test Pattern Generator
+ Driver for the Xilinx Video Test Pattern Generator
+
+config VIDEO_XILINX_VPSS_CSC
+ tristate "Xilinx VPSS CSC"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for the Xilinx Video Processing Sub-System (VPSS)
+ Color Space Conversion. The driver provides RGB to YUV444
+ conversion and provides video controls like Brightness,
+ Contrast, Color Gains that can be applied to video.
+ Say N if unsure. Say M to modularize.
+
+config VIDEO_XILINX_VPSS_SCALER
+ tristate "Xilinx Video VPSS Scaler"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Video Processing Sub-System(VPSS) Scaler.
+ It allows upscaling and downscaling of video. It also supports
+ limited Color Space Conversion.
+ Say N if unsure.
config VIDEO_XILINX_VTC
tristate "Xilinx Video Timing Controller"
depends on VIDEO_XILINX
help
- Driver for the Xilinx Video Timing Controller
+ Driver for the Xilinx Video Timing Controller
+
+config VIDEO_XILINX_CSI2RXSS
+ tristate "Xilinx CSI2 Rx Subsystem"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx MIPI CSI2 Rx Subsystem
+
+config VIDEO_XILINX_SCD
+ tristate "Xilinx Scene Change Detect"
+ depends on VIDEO_XILINX
+ ---help---
+ Driver for Xilinx Scene Change Detection Controller.
+ The driver allows applications to pass video buffers and
+ provides if scene change detection is present between
+ adjacent frames.
+
+config VIDEO_XILINX_M2M
+ tristate "Xilinx Video mem2mem"
+ depends on VIDEO_XILINX
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Driver for Xilinx V4L2 mem2mem pipeline operation to achieve memory
+ copy between two different physical memories using DMA transfers.
endif #VIDEO_XILINX
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
index 4cdc0b1ec7a5..173896ec4fe7 100644
--- a/drivers/media/platform/xilinx/Makefile
+++ b/drivers/media/platform/xilinx/Makefile
@@ -1,7 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
+xilinx-scd-objs += xilinx-scenechange.o xilinx-scenechange-channel.o \
+ xilinx-scenechange-dma.o
xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
+obj-$(CONFIG_VIDEO_XILINX_AXI4S_SWITCH) += xilinx-axis-switch.o
+obj-$(CONFIG_VIDEO_XILINX_CFA) += xilinx-cfa.o
+obj-$(CONFIG_VIDEO_XILINX_CRESAMPLE) += xilinx-cresample.o
+obj-$(CONFIG_VIDEO_XILINX_CSI2RXSS) += xilinx-csi2rxss.o
+obj-$(CONFIG_VIDEO_XILINX_DEMOSAIC) += xilinx-demosaic.o
+obj-$(CONFIG_VIDEO_XILINX_GAMMA) += xilinx-gamma.o
+obj-$(CONFIG_VIDEO_XILINX_HLS) += xilinx-hls.o
+obj-$(CONFIG_VIDEO_XILINX_M2M) += xilinx-m2m.o
+obj-$(CONFIG_VIDEO_XILINX_MULTISCALER) += xilinx-multi-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_REMAPPER) += xilinx-remapper.o
+obj-$(CONFIG_VIDEO_XILINX_RGB2YUV) += xilinx-rgb2yuv.o
+obj-$(CONFIG_VIDEO_XILINX_SCALER) += xilinx-scaler.o
+obj-$(CONFIG_VIDEO_XILINX_SCD) += xilinx-scd.o
+obj-$(CONFIG_VIDEO_XILINX_SDIRXSS) += xilinx-sdirxss.o
+obj-$(CONFIG_VIDEO_XILINX_SWITCH) += xilinx-switch.o
obj-$(CONFIG_VIDEO_XILINX_TPG) += xilinx-tpg.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_CSC) += xilinx-vpss-csc.o
+obj-$(CONFIG_VIDEO_XILINX_VPSS_SCALER) += xilinx-vpss-scaler.o
obj-$(CONFIG_VIDEO_XILINX_VTC) += xilinx-vtc.o
diff --git a/drivers/media/platform/xilinx/xilinx-axis-switch.c b/drivers/media/platform/xilinx/xilinx-axis-switch.c
new file mode 100644
index 000000000000..86cd8585116d
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-axis-switch.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXI4-Stream Video Switch
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XVSW_CTRL_REG 0x00
+#define XVSW_CTRL_REG_UPDATE_MASK BIT(1)
+
+#define XVSW_MI_MUX_REG_BASE 0x40
+#define XVSW_MI_MUX_VAL_MASK 0xF
+#define XVSW_MI_MUX_DISABLE_MASK BIT(31)
+
+#define MIN_VSW_SINKS 1
+#define MAX_VSW_SINKS 16
+#define MIN_VSW_SRCS 1
+#define MAX_VSW_SRCS 16
+
+/**
+ * struct xvswitch_device - Xilinx AXI4-Stream Switch device structure
+ * @dev: Platform structure
+ * @iomem: Base address of IP
+ * @subdev: The v4l2 subdev structure
+ * @pads: media pads
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ * @nsinks: number of sink pads (1 to 8)
+ * @nsources: number of source pads (2 to 8)
+ * @tdest_routing: Whether TDEST routing is enabled
+ * @aclk: Video clock
+ * @saxi_ctlclk: AXI-Lite control clock
+ */
+struct xvswitch_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct v4l2_subdev subdev;
+ struct media_pad *pads;
+ int routing[MAX_VSW_SRCS];
+ struct v4l2_mbus_framefmt *formats;
+ u32 nsinks;
+ u32 nsources;
+ bool tdest_routing;
+ struct clk *aclk;
+ struct clk *saxi_ctlclk;
+};
+
+static inline struct xvswitch_device *to_xvsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xvswitch_device, subdev);
+}
+
+static inline u32 xvswitch_read(struct xvswitch_device *xvsw, u32 addr)
+{
+ return ioread32(xvsw->iomem + addr);
+}
+
+static inline void xvswitch_write(struct xvswitch_device *xvsw, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xvsw->iomem + addr);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xvsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+
+ /* Nothing to be done in case of TDEST routing */
+ if (xvsw->tdest_routing)
+ return 0;
+
+ if (!enable) {
+ /* In control reg routing, disable all master ports */
+ for (i = 0; i < xvsw->nsources; i++) {
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ XVSW_MI_MUX_DISABLE_MASK);
+ }
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+ return 0;
+ }
+
+ /*
+ * In case of control reg routing,
+ * from routing table write the values into respective reg
+ * and enable
+ */
+ for (i = 0; i < MAX_VSW_SRCS; i++) {
+ u32 val;
+
+ if (xvsw->routing[i] != -1)
+ val = xvsw->routing[i];
+ else
+ val = XVSW_MI_MUX_DISABLE_MASK;
+
+ xvswitch_write(xvsw, XVSW_MI_MUX_REG_BASE + (i * 4),
+ val);
+ }
+
+ xvswitch_write(xvsw, XVSW_CTRL_REG, XVSW_CTRL_REG_UPDATE_MASK);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xvsw_get_pad_format(struct xvswitch_device *xvsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xvsw->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xvsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xvsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ int pad = fmt->pad;
+
+ /*
+ * If control reg routing and pad is source pad then
+ * get corresponding sink pad. if no sink pad then
+ * clear the format and return
+ */
+
+ if (!xvsw->tdest_routing && pad >= xvsw->nsinks) {
+ pad = xvsw->routing[pad - xvsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xvsw_get_pad_format(xvsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xvsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (!xvsw->tdest_routing && fmt->pad >= xvsw->nsinks) {
+ /*
+ * In case of control reg routing,
+ * get the corresponding sink pad to source pad passed.
+ *
+ * The source pad format is always identical to the
+ * sink pad format and can't be modified.
+ *
+ * If sink pad found then get_format for that pad
+ * else clear the fmt->format as the source pad
+ * isn't connected and return.
+ */
+ return xvsw_get_format(subdev, cfg, fmt);
+ }
+
+ if (xvsw->nsinks == 1 && fmt->pad != 0) {
+ struct v4l2_mbus_framefmt *sinkformat;
+
+ /*
+ * in tdest routing if there is only one sink then all the
+ * source pads will have same property as sink pad, assuming
+ * streams going to each source pad will have same
+ * properties.
+ */
+
+ /* get sink pad format */
+ sinkformat = xvsw_get_pad_format(xvsw, cfg, 0, fmt->which);
+
+ fmt->format = *sinkformat;
+
+ /* set sink pad format on source pad */
+ format = xvsw_get_pad_format(xvsw, cfg, fmt->pad, fmt->which);
+ *format = *sinkformat;
+
+ return 0;
+ }
+
+ /*
+ * In TDEST routing mode, one can set any format on the pad as
+ * it can't be checked which pad's data will travel to
+ * which pad. E.g. In a system with 2 slaves and 4 masters,
+ * S0 or S1 data can reach M0 thru M3 based on TDEST
+ * S0 may have RBG and S1 may have YUV. M0, M1 stream RBG
+ * and M2, M3 stream YUV based on TDEST.
+ *
+ * In Control reg routing mode, set format only for sink pads.
+ */
+ format = xvsw_get_pad_format(xvsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xvsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ u32 min;
+
+ /* In case of tdest routing, we can't get routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (xvsw->nsources < route->num_routes)
+ min = xvsw->nsources;
+ else
+ min = route->num_routes;
+
+ for (i = 0; i < min; ++i) {
+ route->routes[i].sink = xvsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xvsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xvsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xvswitch_device *xvsw = to_xvsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ /* In case of tdest routing, we can't set routing */
+ if (xvsw->tdest_routing)
+ return -EINVAL;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xvsw->nsources; ++i)
+ xvsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xvsw->routing[route->routes[i].source - xvsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+static int xvsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xvsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xvsw_video_ops = {
+ .s_stream = xvsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xvsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xvsw_get_format,
+ .set_fmt = xvsw_set_format,
+ .get_routing = xvsw_get_routing,
+ .set_routing = xvsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xvsw_ops = {
+ .video = &xvsw_video_ops,
+ .pad = &xvsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xvsw_internal_ops = {
+ .open = xvsw_open,
+ .close = xvsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xvsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xvswitch_device *xvsw =
+ container_of(entity, struct xvswitch_device, subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xvsw->nsinks && pad1 < xvsw->nsinks)
+ return false;
+
+ /* In TDEST routing, assume all sinks and sources are connected */
+ if (xvsw->tdest_routing)
+ return true;
+
+ sink0 = pad0 < xvsw->nsinks ? pad0 : xvsw->routing[pad0 - xvsw->nsinks];
+ sink1 = pad1 < xvsw->nsinks ? pad1 : xvsw->routing[pad1 - xvsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xvsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xvsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xvsw_parse_of(struct xvswitch_device *xvsw)
+{
+ struct device_node *node = xvsw->dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int nports = 0;
+ u32 routing_mode;
+ int ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-si-slots", &xvsw->nsinks);
+ if (ret < 0 || xvsw->nsinks < MIN_VSW_SINKS ||
+ xvsw->nsinks > MAX_VSW_SINKS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-si-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-mi-slots", &xvsw->nsources);
+ if (ret < 0 || xvsw->nsources < MIN_VSW_SRCS ||
+ xvsw->nsources > MAX_VSW_SRCS) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,num-mi-slots property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,routing-mode", &routing_mode);
+ if (ret < 0 || routing_mode < 0 || routing_mode > 1) {
+ dev_err(xvsw->dev, "missing or invalid xlnx,routing property\n");
+ return ret;
+ }
+
+ if (!routing_mode)
+ xvsw->tdest_routing = true;
+
+ xvsw->aclk = devm_clk_get(xvsw->dev, "aclk");
+ if (IS_ERR(xvsw->aclk)) {
+ ret = PTR_ERR(xvsw->aclk);
+ dev_err(xvsw->dev, "failed to get ap_clk (%d)\n", ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ xvsw->saxi_ctlclk = devm_clk_get(xvsw->dev,
+ "s_axi_ctl_clk");
+ if (IS_ERR(xvsw->saxi_ctlclk)) {
+ ret = PTR_ERR(xvsw->saxi_ctlclk);
+ dev_err(xvsw->dev,
+ "failed to get s_axi_ctl_clk (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (xvsw->tdest_routing && xvsw->nsinks > 1) {
+ dev_err(xvsw->dev, "sinks = %d. Driver Limitation max 1 sink in TDEST routing mode\n",
+ xvsw->nsinks);
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(xvsw->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ /* validate number of ports */
+ if (nports != (xvsw->nsinks + xvsw->nsources)) {
+ dev_err(xvsw->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xvsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xvswitch_device *xvsw;
+ struct resource *res;
+ unsigned int npads;
+ unsigned int i, padcount;
+ int ret;
+
+ xvsw = devm_kzalloc(&pdev->dev, sizeof(*xvsw), GFP_KERNEL);
+ if (!xvsw)
+ return -ENOMEM;
+
+ xvsw->dev = &pdev->dev;
+
+ ret = xvsw_parse_of(xvsw);
+ if (ret < 0)
+ return ret;
+
+ /* ioremap only if control reg based routing */
+ if (!xvsw->tdest_routing) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvsw->iomem = devm_ioremap_resource(xvsw->dev, res);
+ if (IS_ERR(xvsw->iomem))
+ return PTR_ERR(xvsw->iomem);
+ }
+
+ /*
+ * Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xvsw->nsinks + xvsw->nsources;
+ xvsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xvsw->pads),
+ GFP_KERNEL);
+ if (!xvsw->pads)
+ return -ENOMEM;
+
+ for (i = 0; i < xvsw->nsinks; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ for (; i < npads; ++i)
+ xvsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ padcount = xvsw->tdest_routing ? npads : xvsw->nsinks;
+
+ /*
+ * In case of tdest routing, allocate format per pad.
+ * source pad format has to match one of the sink pads in tdest routing.
+ *
+ * Otherwise only allocate for sinks as sources will
+ * get the same pad format and corresponding sink.
+ * set format on src pad will return corresponding sinks data.
+ */
+ xvsw->formats = devm_kzalloc(&pdev->dev,
+ padcount * sizeof(*xvsw->formats),
+ GFP_KERNEL);
+ if (!xvsw->formats) {
+ dev_err(xvsw->dev, "No memory to allocate formats!\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < padcount; i++) {
+ xvsw->formats[i].code = MEDIA_BUS_FMT_RGB888_1X24;
+ xvsw->formats[i].field = V4L2_FIELD_NONE;
+ xvsw->formats[i].colorspace = V4L2_COLORSPACE_SRGB;
+ xvsw->formats[i].width = XVIP_MAX_WIDTH;
+ xvsw->formats[i].height = XVIP_MAX_HEIGHT;
+ }
+
+ /*
+ * Initialize the routing table if none are connected.
+ * Routing table is valid only incase routing is not TDEST based.
+ */
+ for (i = 0; i < MAX_VSW_SRCS; ++i)
+ xvsw->routing[i] = -1;
+
+ ret = clk_prepare_enable(xvsw->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (!xvsw->tdest_routing) {
+ ret = clk_prepare_enable(xvsw->saxi_ctlclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctl_clk (%d)\n",
+ ret);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+ }
+ }
+
+ subdev = &xvsw->subdev;
+ v4l2_subdev_init(subdev, &xvsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xvsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xvsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xvsw_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xvsw->pads);
+ if (ret < 0)
+ goto clk_error;
+
+ platform_set_drvdata(pdev, xvsw);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xvsw->dev, "Xilinx AXI4-Stream Switch found!\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+clk_error:
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return ret;
+}
+
+static int xvsw_remove(struct platform_device *pdev)
+{
+ struct xvswitch_device *xvsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xvsw->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ if (!xvsw->tdest_routing)
+ clk_disable_unprepare(xvsw->saxi_ctlclk);
+ clk_disable_unprepare(xvsw->aclk);
+ return 0;
+}
+
+static const struct of_device_id xvsw_of_id_table[] = {
+ { .compatible = "xlnx,axis-switch-1.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvsw_of_id_table);
+
+static struct platform_driver xvsw_driver = {
+ .driver = {
+ .name = "xilinx-axis-switch",
+ .of_match_table = xvsw_of_id_table,
+ },
+ .probe = xvsw_probe,
+ .remove = xvsw_remove,
+};
+
+module_platform_driver(xvsw_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vishal.sagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx AXI4-Stream Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cfa.c b/drivers/media/platform/xilinx/xilinx-cfa.c
new file mode 100644
index 000000000000..832fb7306563
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cfa.c
@@ -0,0 +1,394 @@
+/*
+ * Xilinx Color Filter Array
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCFA_BAYER_PHASE 0x100
+#define XCFA_BAYER_PHASE_RGGB 0
+#define XCFA_BAYER_PHASE_GRBG 1
+#define XCFA_BAYER_PHASE_GBRG 2
+#define XCFA_BAYER_PHASE_BGGR 3
+
+/**
+ * struct xcfa_device - Xilinx CFA device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ */
+struct xcfa_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+};
+
+static inline struct xcfa_device *to_cfa(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcfa_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcfa_get_bayer_phase(const unsigned int code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return XCFA_BAYER_PHASE_RGGB;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ return XCFA_BAYER_PHASE_GRBG;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ return XCFA_BAYER_PHASE_GBRG;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return XCFA_BAYER_PHASE_BGGR;
+ }
+
+ return -EINVAL;
+}
+
+static int xcfa_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ const unsigned int code = xcfa->formats[XVIP_PAD_SINK].code;
+ u32 bayer_phase;
+
+ if (!enable) {
+ xvip_stop(&xcfa->xvip);
+ return 0;
+ }
+
+ /* This always returns the valid bayer phase value */
+ bayer_phase = xcfa_get_bayer_phase(code);
+
+ xvip_write(&xcfa->xvip, XCFA_BAYER_PHASE, bayer_phase);
+
+ xvip_set_frame_size(&xcfa->xvip, &xcfa->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcfa_get_pad_format(struct xcfa_device *xcfa,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcfa->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcfa->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcfa_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+
+ fmt->format = *__xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xcfa_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+ int bayer_phase;
+
+ format = __xcfa_get_pad_format(xcfa, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ bayer_phase = xcfa_get_bayer_phase(fmt->format.code);
+ if (bayer_phase >= 0) {
+ xcfa->vip_formats[XVIP_PAD_SINK] =
+ xvip_get_format_by_code(fmt->format.code);
+ format->code = fmt->format.code;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad */
+ format = __xcfa_get_pad_format(xcfa, cfg, XVIP_PAD_SOURCE, fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcfa_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xcfa_device *xcfa = to_cfa(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcfa->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcfa->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcfa_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xcfa_video_ops = {
+ .s_stream = xcfa_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcfa_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcfa_get_format,
+ .set_fmt = xcfa_set_format,
+};
+
+static struct v4l2_subdev_ops xcfa_ops = {
+ .video = &xcfa_video_ops,
+ .pad = &xcfa_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcfa_internal_ops = {
+ .open = xcfa_open,
+ .close = xcfa_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcfa_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcfa_pm_suspend(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcfa->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcfa_pm_resume(struct device *dev)
+{
+ struct xcfa_device *xcfa = dev_get_drvdata(dev);
+
+ xvip_resume(&xcfa->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcfa_parse_of(struct xcfa_device *xcfa)
+{
+ struct device *dev = xcfa->xvip.dev;
+ struct device_node *node = xcfa->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcfa->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcfa_probe(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcfa = devm_kzalloc(&pdev->dev, sizeof(*xcfa), GFP_KERNEL);
+ if (!xcfa)
+ return -ENOMEM;
+
+ xcfa->xvip.dev = &pdev->dev;
+
+ ret = xcfa_parse_of(xcfa);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcfa->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcfa->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcfa->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcfa_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcfa_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcfa);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcfa->xvip, default_format);
+
+ xcfa->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcfa->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcfa->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcfa->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcfa->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcfa->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcfa->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcfa_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcfa->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xcfa);
+
+ xvip_print_version(&xcfa->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcfa->xvip);
+ return ret;
+}
+
+static int xcfa_remove(struct platform_device *pdev)
+{
+ struct xcfa_device *xcfa = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcfa->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcfa->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcfa_pm_ops, xcfa_pm_suspend, xcfa_pm_resume);
+
+static const struct of_device_id xcfa_of_id_table[] = {
+ { .compatible = "xlnx,v-cfa-7.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcfa_of_id_table);
+
+static struct platform_driver xcfa_driver = {
+ .driver = {
+ .name = "xilinx-cfa",
+ .pm = &xcfa_pm_ops,
+ .of_match_table = xcfa_of_id_table,
+ },
+ .probe = xcfa_probe,
+ .remove = xcfa_remove,
+};
+
+module_platform_driver(xcfa_driver);
+
+MODULE_DESCRIPTION("Xilinx Color Filter Array Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-cresample.c b/drivers/media/platform/xilinx/xilinx-cresample.c
new file mode 100644
index 000000000000..05335c10a388
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-cresample.c
@@ -0,0 +1,447 @@
+/*
+ * Xilinx Chroma Resampler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XCRESAMPLE_ENCODING 0x100
+#define XCRESAMPLE_ENCODING_FIELD (1 << 7)
+#define XCRESAMPLE_ENCODING_CHROMA (1 << 8)
+
+/**
+ * struct xcresample_device - Xilinx CRESAMPLE device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xcresample_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xcresample_device *to_cresample(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcresample_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xcresample_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ if (!enable) {
+ xvip_stop(&xcresample->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xcresample->xvip,
+ &xcresample->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xcresample_get_pad_format(struct xcresample_device *xcresample,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcresample->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcresample->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xcresample_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+
+ fmt->format = *__xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xcresample_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xcresample_get_pad_format(xcresample, cfg, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xcresample_get_pad_format(xcresample, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xcresample_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcresample_device *xcresample = to_cresample(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcresample->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcresample->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcresample_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xcresample_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcresample_device *xcresample =
+ container_of(ctrl->handler, struct xcresample_device,
+ ctrl_handler);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_FIELD, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY:
+ xvip_clr_or_set(&xcresample->xvip, XCRESAMPLE_ENCODING,
+ XCRESAMPLE_ENCODING_CHROMA, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+
+}
+
+static const struct v4l2_ctrl_ops xcresample_ctrl_ops = {
+ .s_ctrl = xcresample_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xcresample_video_ops = {
+ .s_stream = xcresample_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xcresample_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcresample_get_format,
+ .set_fmt = xcresample_set_format,
+};
+
+static struct v4l2_subdev_ops xcresample_ops = {
+ .video = &xcresample_video_ops,
+ .pad = &xcresample_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xcresample_internal_ops = {
+ .open = xcresample_open,
+ .close = xcresample_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static const char *const xcresample_parity_string[] = {
+ "Even",
+ "Odd",
+};
+
+static struct v4l2_ctrl_config xcresample_field = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY,
+ .name = "Chroma Resampler: Encoding Field Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+static struct v4l2_ctrl_config xcresample_chroma = {
+ .ops = &xcresample_ctrl_ops,
+ .id = V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY,
+ .name = "Chroma Resampler: Encoding Chroma Parity",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .qmenu = xcresample_parity_string,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcresample_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xcresample_pm_suspend(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_suspend(&xcresample->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xcresample_pm_resume(struct device *dev)
+{
+ struct xcresample_device *xcresample = dev_get_drvdata(dev);
+
+ xvip_resume(&xcresample->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xcresample_parse_of(struct xcresample_device *xcresample)
+{
+ struct device *dev = xcresample->xvip.dev;
+ struct device_node *node = xcresample->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xcresample->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xcresample_probe(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+
+ xcresample = devm_kzalloc(&pdev->dev, sizeof(*xcresample), GFP_KERNEL);
+ if (!xcresample)
+ return -ENOMEM;
+
+ xcresample->xvip.dev = &pdev->dev;
+
+ ret = xcresample_parse_of(xcresample);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xcresample->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xcresample->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcresample->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcresample_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcresample_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcresample);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xcresample->xvip, default_format);
+
+ xcresample->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xcresample->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xcresample->default_formats[XVIP_PAD_SINK];
+ default_format->code = xcresample->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xcresample->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xcresample->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcresample->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xcresample_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xcresample->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xcresample->ctrl_handler, 2);
+ xcresample_field.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_FIELD) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_field,
+ NULL);
+ xcresample_chroma.def =
+ (xvip_read(&xcresample->xvip, XCRESAMPLE_ENCODING) &
+ XCRESAMPLE_ENCODING_CHROMA) ? 1 : 0;
+ v4l2_ctrl_new_custom(&xcresample->ctrl_handler, &xcresample_chroma,
+ NULL);
+ if (xcresample->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcresample->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xcresample->ctrl_handler;
+
+ platform_set_drvdata(pdev, xcresample);
+
+ xvip_print_version(&xcresample->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcresample->xvip);
+ return ret;
+}
+
+static int xcresample_remove(struct platform_device *pdev)
+{
+ struct xcresample_device *xcresample = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcresample->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcresample->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xcresample->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcresample_pm_ops, xcresample_pm_suspend,
+ xcresample_pm_resume);
+
+static const struct of_device_id xcresample_of_id_table[] = {
+ { .compatible = "xlnx,v-cresample-4.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcresample_of_id_table);
+
+static struct platform_driver xcresample_driver = {
+ .driver = {
+ .name = "xilinx-cresample",
+ .pm = &xcresample_pm_ops,
+ .of_match_table = xcresample_of_id_table,
+ },
+ .probe = xcresample_probe,
+ .remove = xcresample_remove,
+};
+
+module_platform_driver(xcresample_driver);
+
+MODULE_DESCRIPTION("Xilinx Chroma Resampler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
new file mode 100644
index 000000000000..546783a4efcb
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -0,0 +1,2098 @@
+/*
+ * Xilinx MIPI CSI2 Subsystem
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-csi2rxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * MIPI CSI2 Rx register map, bitmask and offsets
+ */
+#define XCSI_CCR_OFFSET 0x00000000
+#define XCSI_CCR_SOFTRESET_SHIFT 1
+#define XCSI_CCR_COREENB_SHIFT 0
+#define XCSI_CCR_SOFTRESET_MASK BIT(XCSI_CCR_SOFTRESET_SHIFT)
+#define XCSI_CCR_COREENB_MASK BIT(XCSI_CCR_COREENB_SHIFT)
+
+#define XCSI_PCR_OFFSET 0x00000004
+#define XCSI_PCR_MAXLANES_MASK 0x00000018
+#define XCSI_PCR_ACTLANES_MASK 0x00000003
+#define XCSI_PCR_MAXLANES_SHIFT 3
+#define XCSI_PCR_ACTLANES_SHIFT 0
+
+#define XCSI_CSR_OFFSET 0x00000010
+#define XCSI_CSR_PKTCOUNT_SHIFT 16
+#define XCSI_CSR_SPFIFOFULL_SHIFT 3
+#define XCSI_CSR_SPFIFONE_SHIFT 2
+#define XCSI_CSR_SLBF_SHIFT 1
+#define XCSI_CSR_RIPCD_SHIFT 0
+#define XCSI_CSR_PKTCOUNT_MASK 0xFFFF0000
+#define XCSI_CSR_SPFIFOFULL_MASK BIT(XCSI_CSR_SPFIFOFULL_SHIFT)
+#define XCSI_CSR_SPFIFONE_MASK BIT(XCSI_CSR_SPFIFONE_SHIFT)
+#define XCSI_CSR_SLBF_MASK BIT(XCSI_CSR_SLBF_SHIFT)
+#define XCSI_CSR_RIPCD_MASK BIT(XCSI_CSR_RIPCD_SHIFT)
+
+#define XCSI_GIER_OFFSET 0x00000020
+#define XCSI_GIER_GIE_SHIFT 0
+#define XCSI_GIER_GIE_MASK BIT(XCSI_GIER_GIE_SHIFT)
+#define XCSI_GIER_SET 1
+#define XCSI_GIER_RESET 0
+
+#define XCSI_ISR_OFFSET 0x00000024
+#define XCSI_ISR_FR_SHIFT 31
+#define XCSI_ISR_VCX_SHIFT 30
+#define XCSI_ISR_ILC_SHIFT 21
+#define XCSI_ISR_SPFIFOF_SHIFT 20
+#define XCSI_ISR_SPFIFONE_SHIFT 19
+#define XCSI_ISR_SLBF_SHIFT 18
+#define XCSI_ISR_STOP_SHIFT 17
+#define XCSI_ISR_SOTERR_SHIFT 13
+#define XCSI_ISR_SOTSYNCERR_SHIFT 12
+#define XCSI_ISR_ECC2BERR_SHIFT 11
+#define XCSI_ISR_ECC1BERR_SHIFT 10
+#define XCSI_ISR_CRCERR_SHIFT 9
+#define XCSI_ISR_DATAIDERR_SHIFT 8
+#define XCSI_ISR_VC3FSYNCERR_SHIFT 7
+#define XCSI_ISR_VC3FLVLERR_SHIFT 6
+#define XCSI_ISR_VC2FSYNCERR_SHIFT 5
+#define XCSI_ISR_VC2FLVLERR_SHIFT 4
+#define XCSI_ISR_VC1FSYNCERR_SHIFT 3
+#define XCSI_ISR_VC1FLVLERR_SHIFT 2
+#define XCSI_ISR_VC0FSYNCERR_SHIFT 1
+#define XCSI_ISR_VC0FLVLERR_SHIFT 0
+#define XCSI_ISR_FR_MASK BIT(XCSI_ISR_FR_SHIFT)
+#define XCSI_ISR_VCX_MASK BIT(XCSI_ISR_VCX_SHIFT)
+#define XCSI_ISR_ILC_MASK BIT(XCSI_ISR_ILC_SHIFT)
+#define XCSI_ISR_SPFIFOF_MASK BIT(XCSI_ISR_SPFIFOF_SHIFT)
+#define XCSI_ISR_SPFIFONE_MASK BIT(XCSI_ISR_SPFIFONE_SHIFT)
+#define XCSI_ISR_SLBF_MASK BIT(XCSI_ISR_SLBF_SHIFT)
+#define XCSI_ISR_STOP_MASK BIT(XCSI_ISR_STOP_SHIFT)
+#define XCSI_ISR_SOTERR_MASK BIT(XCSI_ISR_SOTERR_SHIFT)
+#define XCSI_ISR_SOTSYNCERR_MASK BIT(XCSI_ISR_SOTSYNCERR_SHIFT)
+#define XCSI_ISR_ECC2BERR_MASK BIT(XCSI_ISR_ECC2BERR_SHIFT)
+#define XCSI_ISR_ECC1BERR_MASK BIT(XCSI_ISR_ECC1BERR_SHIFT)
+#define XCSI_ISR_CRCERR_MASK BIT(XCSI_ISR_CRCERR_SHIFT)
+#define XCSI_ISR_DATAIDERR_MASK BIT(XCSI_ISR_DATAIDERR_SHIFT)
+#define XCSI_ISR_VC3FSYNCERR_MASK BIT(XCSI_ISR_VC3FSYNCERR_SHIFT)
+#define XCSI_ISR_VC3FLVLERR_MASK BIT(XCSI_ISR_VC3FLVLERR_SHIFT)
+#define XCSI_ISR_VC2FSYNCERR_MASK BIT(XCSI_ISR_VC2FSYNCERR_SHIFT)
+#define XCSI_ISR_VC2FLVLERR_MASK BIT(XCSI_ISR_VC2FLVLERR_SHIFT)
+#define XCSI_ISR_VC1FSYNCERR_MASK BIT(XCSI_ISR_VC1FSYNCERR_SHIFT)
+#define XCSI_ISR_VC1FLVLERR_MASK BIT(XCSI_ISR_VC1FLVLERR_SHIFT)
+#define XCSI_ISR_VC0FSYNCERR_MASK BIT(XCSI_ISR_VC0FSYNCERR_SHIFT)
+#define XCSI_ISR_VC0FLVLERR_MASK BIT(XCSI_ISR_VC0FLVLERR_SHIFT)
+#define XCSI_ISR_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_INTR_PROT_MASK (XCSI_ISR_VC3FSYNCERR_MASK | \
+ XCSI_ISR_VC3FLVLERR_MASK | \
+ XCSI_ISR_VC2FSYNCERR_MASK | \
+ XCSI_ISR_VC2FLVLERR_MASK | \
+ XCSI_ISR_VC1FSYNCERR_MASK | \
+ XCSI_ISR_VC1FLVLERR_MASK | \
+ XCSI_ISR_VC0FSYNCERR_MASK | \
+ XCSI_ISR_VC0FLVLERR_MASK | \
+ XCSI_ISR_VCX_MASK)
+
+#define XCSI_INTR_PKTLVL_MASK (XCSI_ISR_ECC2BERR_MASK | \
+ XCSI_ISR_ECC1BERR_MASK | \
+ XCSI_ISR_CRCERR_MASK | \
+ XCSI_ISR_DATAIDERR_MASK)
+
+#define XCSI_INTR_DPHY_MASK (XCSI_ISR_SOTERR_MASK | \
+ XCSI_ISR_SOTSYNCERR_MASK)
+
+#define XCSI_INTR_SPKT_MASK (XCSI_ISR_SPFIFOF_MASK | \
+ XCSI_ISR_SPFIFONE_MASK)
+
+#define XCSI_INTR_FRAMERCVD_MASK (XCSI_ISR_FR_MASK)
+
+#define XCSI_INTR_ERR_MASK (XCSI_ISR_ILC_MASK | \
+ XCSI_ISR_SLBF_MASK | \
+ XCSI_ISR_STOP_MASK)
+
+#define XCSI_IER_OFFSET 0x00000028
+#define XCSI_IER_FR_SHIFT 31
+#define XCSI_IER_VCX_SHIFT 30
+#define XCSI_IER_ILC_SHIFT 21
+#define XCSI_IER_SPFIFOF_SHIFT 20
+#define XCSI_IER_SPFIFONE_SHIFT 19
+#define XCSI_IER_SLBF_SHIFT 18
+#define XCSI_IER_STOP_SHIFT 17
+#define XCSI_IER_SOTERR_SHIFT 13
+#define XCSI_IER_SOTSYNCERR_SHIFT 12
+#define XCSI_IER_ECC2BERR_SHIFT 11
+#define XCSI_IER_ECC1BERR_SHIFT 10
+#define XCSI_IER_CRCERR_SHIFT 9
+#define XCSI_IER_DATAIDERR_SHIFT 8
+#define XCSI_IER_VC3FSYNCERR_SHIFT 7
+#define XCSI_IER_VC3FLVLERR_SHIFT 6
+#define XCSI_IER_VC2FSYNCERR_SHIFT 5
+#define XCSI_IER_VC2FLVLERR_SHIFT 4
+#define XCSI_IER_VC1FSYNCERR_SHIFT 3
+#define XCSI_IER_VC1FLVLERR_SHIFT 2
+#define XCSI_IER_VC0FSYNCERR_SHIFT 1
+#define XCSI_IER_VC0FLVLERR_SHIFT 0
+#define XCSI_IER_FR_MASK BIT(XCSI_IER_FR_SHIFT)
+#define XCSI_IER_VCX_MASK BIT(XCSI_IER_VCX_SHIFT)
+#define XCSI_IER_ILC_MASK BIT(XCSI_IER_ILC_SHIFT)
+#define XCSI_IER_SPFIFOF_MASK BIT(XCSI_IER_SPFIFOF_SHIFT)
+#define XCSI_IER_SPFIFONE_MASK BIT(XCSI_IER_SPFIFONE_SHIFT)
+#define XCSI_IER_SLBF_MASK BIT(XCSI_IER_SLBF_SHIFT)
+#define XCSI_IER_STOP_MASK BIT(XCSI_IER_STOP_SHIFT)
+#define XCSI_IER_SOTERR_MASK BIT(XCSI_IER_SOTERR_SHIFT)
+#define XCSI_IER_SOTSYNCERR_MASK BIT(XCSI_IER_SOTSYNCERR_SHIFT)
+#define XCSI_IER_ECC2BERR_MASK BIT(XCSI_IER_ECC2BERR_SHIFT)
+#define XCSI_IER_ECC1BERR_MASK BIT(XCSI_IER_ECC1BERR_SHIFT)
+#define XCSI_IER_CRCERR_MASK BIT(XCSI_IER_CRCERR_SHIFT)
+#define XCSI_IER_DATAIDERR_MASK BIT(XCSI_IER_DATAIDERR_SHIFT)
+#define XCSI_IER_VC3FSYNCERR_MASK BIT(XCSI_IER_VC3FSYNCERR_SHIFT)
+#define XCSI_IER_VC3FLVLERR_MASK BIT(XCSI_IER_VC3FLVLERR_SHIFT)
+#define XCSI_IER_VC2FSYNCERR_MASK BIT(XCSI_IER_VC2FSYNCERR_SHIFT)
+#define XCSI_IER_VC2FLVLERR_MASK BIT(XCSI_IER_VC2FLVLERR_SHIFT)
+#define XCSI_IER_VC1FSYNCERR_MASK BIT(XCSI_IER_VC1FSYNCERR_SHIFT)
+#define XCSI_IER_VC1FLVLERR_MASK BIT(XCSI_IER_VC1FLVLERR_SHIFT)
+#define XCSI_IER_VC0FSYNCERR_MASK BIT(XCSI_IER_VC0FSYNCERR_SHIFT)
+#define XCSI_IER_VC0FLVLERR_MASK BIT(XCSI_IER_VC0FLVLERR_SHIFT)
+#define XCSI_IER_ALLINTR_MASK 0xC03FFFFF
+
+#define XCSI_SPKTR_OFFSET 0x00000030
+#define XCSI_SPKTR_DATA_SHIFT 8
+#define XCSI_SPKTR_VC_SHIFT 6
+#define XCSI_SPKTR_DT_SHIFT 0
+#define XCSI_SPKTR_DATA_MASK 0x00FFFF00
+#define XCSI_SPKTR_VC_MASK 0x000000C0
+#define XCSI_SPKTR_DT_MASK 0x0000003F
+
+#define XCSI_VCXR_OFFSET 0x00000034
+#define XCSI_VCXR_VC15FSYNCERR_MASK BIT(23)
+#define XCSI_VCXR_VC15FLVLERR_MASK BIT(22)
+#define XCSI_VCXR_VC14FSYNCERR_MASK BIT(21)
+#define XCSI_VCXR_VC14FLVLERR_MASK BIT(20)
+#define XCSI_VCXR_VC13FSYNCERR_MASK BIT(19)
+#define XCSI_VCXR_VC13FLVLERR_MASK BIT(18)
+#define XCSI_VCXR_VC12FSYNCERR_MASK BIT(17)
+#define XCSI_VCXR_VC12FLVLERR_MASK BIT(16)
+#define XCSI_VCXR_VC11FSYNCERR_MASK BIT(15)
+#define XCSI_VCXR_VC11FLVLERR_MASK BIT(14)
+#define XCSI_VCXR_VC10FSYNCERR_MASK BIT(13)
+#define XCSI_VCXR_VC10FLVLERR_MASK BIT(12)
+#define XCSI_VCXR_VC9FSYNCERR_MASK BIT(11)
+#define XCSI_VCXR_VC9FLVLERR_MASK BIT(10)
+#define XCSI_VCXR_VC8FSYNCERR_MASK BIT(9)
+#define XCSI_VCXR_VC8FLVLERR_MASK BIT(8)
+#define XCSI_VCXR_VC7FSYNCERR_MASK BIT(7)
+#define XCSI_VCXR_VC7FLVLERR_MASK BIT(6)
+#define XCSI_VCXR_VC6FSYNCERR_MASK BIT(5)
+#define XCSI_VCXR_VC6FLVLERR_MASK BIT(4)
+#define XCSI_VCXR_VC5FSYNCERR_MASK BIT(3)
+#define XCSI_VCXR_VC5FLVLERR_MASK BIT(2)
+#define XCSI_VCXR_VC4FSYNCERR_MASK BIT(1)
+#define XCSI_VCXR_VC4FLVLERR_MASK BIT(0)
+#define XCSI_VCXR_MASK 0x00FFFFFF
+
+#define XCSI_CLKINFR_OFFSET 0x0000003C
+#define XCSI_CLKINFR_STOP_SHIFT 1
+#define XCSI_CLKINFR_STOP_MASK BIT(XCSI_CLKINFR_STOP_SHIFT)
+
+#define XCSI_L0INFR_OFFSET 0x00000040
+#define XCSI_L1INFR_OFFSET 0x00000044
+#define XCSI_L2INFR_OFFSET 0x00000048
+#define XCSI_L3INFR_OFFSET 0x0000004C
+#define XCSI_LXINFR_STOP_SHIFT 5
+#define XCSI_LXINFR_SOTERR_SHIFT 1
+#define XCSI_LXINFR_SOTSYNCERR_SHIFT 0
+#define XCSI_LXINFR_STOP_MASK BIT(XCSI_LXINFR_STOP_SHIFT)
+#define XCSI_LXINFR_SOTERR_MASK BIT(XCSI_LXINFR_SOTERR_SHIFT)
+#define XCSI_LXINFR_SOTSYNCERR_MASK BIT(XCSI_LXINFR_SOTSYNCERR_SHIFT)
+
+#define XCSI_VC0INF1R_OFFSET 0x00000060
+#define XCSI_VC1INF1R_OFFSET 0x00000068
+#define XCSI_VC2INF1R_OFFSET 0x00000070
+#define XCSI_VC3INF1R_OFFSET 0x00000078
+#define XCSI_VC4INF1R_OFFSET 0x00000080
+#define XCSI_VC5INF1R_OFFSET 0x00000088
+#define XCSI_VC6INF1R_OFFSET 0x00000090
+#define XCSI_VC7INF1R_OFFSET 0x00000098
+#define XCSI_VC8INF1R_OFFSET 0x000000A0
+#define XCSI_VC9INF1R_OFFSET 0x000000A8
+#define XCSI_VC10INF1R_OFFSET 0x000000B0
+#define XCSI_VC11INF1R_OFFSET 0x000000B8
+#define XCSI_VC12INF1R_OFFSET 0x000000C0
+#define XCSI_VC13INF1R_OFFSET 0x000000C8
+#define XCSI_VC14INF1R_OFFSET 0x000000D0
+#define XCSI_VC15INF1R_OFFSET 0x000000D8
+#define XCSI_VCXINF1R_LINECOUNT_SHIFT 16
+#define XCSI_VCXINF1R_BYTECOUNT_SHIFT 0
+#define XCSI_VCXINF1R_LINECOUNT_MASK 0xFFFF0000
+#define XCSI_VCXINF1R_BYTECOUNT_MASK 0x0000FFFF
+
+#define XCSI_VC0INF2R_OFFSET 0x00000064
+#define XCSI_VC1INF2R_OFFSET 0x0000006C
+#define XCSI_VC2INF2R_OFFSET 0x00000074
+#define XCSI_VC3INF2R_OFFSET 0x0000007C
+#define XCSI_VC4INF2R_OFFSET 0x00000084
+#define XCSI_VC5INF2R_OFFSET 0x0000008C
+#define XCSI_VC6INF2R_OFFSET 0x00000094
+#define XCSI_VC7INF2R_OFFSET 0x0000009C
+#define XCSI_VC8INF2R_OFFSET 0x000000A4
+#define XCSI_VC9INF2R_OFFSET 0x000000AC
+#define XCSI_VC10INF2R_OFFSET 0x000000B4
+#define XCSI_VC11INF2R_OFFSET 0x000000BC
+#define XCSI_VC12INF2R_OFFSET 0x000000C4
+#define XCSI_VC13INF2R_OFFSET 0x000000CC
+#define XCSI_VC14INF2R_OFFSET 0x000000D4
+#define XCSI_VC15INF2R_OFFSET 0x000000DC
+#define XCSI_VCXINF2R_DATATYPE_SHIFT 0
+#define XCSI_VCXINF2R_DATATYPE_MASK 0x0000003F
+
+#define XDPHY_CTRLREG_OFFSET 0x0
+#define XDPHY_CTRLREG_DPHYEN_SHIFT 1
+#define XDPHY_CTRLREG_DPHYEN_MASK BIT(XDPHY_CTRLREG_DPHYEN_SHIFT)
+
+#define XDPHY_CLKSTATREG_OFFSET 0x18
+#define XDPHY_CLKSTATREG_MODE_SHIFT 0
+#define XDPHY_CLKSTATREG_MODE_MASK 0x3
+#define XDPHY_LOW_POWER_MODE 0x0
+#define XDPHY_HI_SPEED_MODE 0x1
+#define XDPHY_ESC_MODE 0x2
+
+/*
+ * Interrupt mask
+ */
+#define XCSI_INTR_MASK (XCSI_ISR_ALLINTR_MASK & ~XCSI_ISR_STOP_MASK)
+/*
+ * Timeout for reset
+ */
+#define XCSI_TIMEOUT_VAL (1000) /* us */
+
+/*
+ * Max string length for CSI Data type string
+ */
+#define MAX_XIL_CSIDT_STR_LENGTH 64
+
+/*
+ * Maximum number of short packet events per file handle.
+ */
+#define XCSI_MAX_SPKT (512)
+
+/* Number of media pads */
+#define XILINX_CSI_MEDIA_PADS (2)
+
+#define XCSI_DEFAULT_WIDTH (1920)
+#define XCSI_DEFAULT_HEIGHT (1080)
+
+#define XCSI_DPHY_CLK_MIN 197000000000UL
+#define XCSI_DPHY_CLK_MAX 203000000000UL
+#define XCSI_DPHY_CLK_REQ 200000000000UL
+
+/*
+ * Macro to return "true" or "false" string if bit is set
+ */
+#define XCSI_GET_BITSET_STR(val, mask) (val) & (mask) ? "true" : "false"
+
+#define XCSI_CLK_PROP BIT(0)
+#define XCSI_DPHY_PROP BIT(1)
+#define XCSI_DPHY_ADDR_PROP BIT(2)
+
+/**
+ * struct xcsi2rxss_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xcsi2rxss_feature {
+ u32 flags;
+};
+
+enum CSI_DataTypes {
+ MIPI_CSI_DT_FRAME_START_CODE = 0x00,
+ MIPI_CSI_DT_FRAME_END_CODE,
+ MIPI_CSI_DT_LINE_START_CODE,
+ MIPI_CSI_DT_LINE_END_CODE,
+ MIPI_CSI_DT_SYNC_RSVD_04,
+ MIPI_CSI_DT_SYNC_RSVD_05,
+ MIPI_CSI_DT_SYNC_RSVD_06,
+ MIPI_CSI_DT_SYNC_RSVD_07,
+ MIPI_CSI_DT_GSPKT_08,
+ MIPI_CSI_DT_GSPKT_09,
+ MIPI_CSI_DT_GSPKT_0A,
+ MIPI_CSI_DT_GSPKT_0B,
+ MIPI_CSI_DT_GSPKT_0C,
+ MIPI_CSI_DT_GSPKT_0D,
+ MIPI_CSI_DT_GSPKT_0E,
+ MIPI_CSI_DT_GSPKT_0F,
+ MIPI_CSI_DT_GLPKT_10,
+ MIPI_CSI_DT_GLPKT_11,
+ MIPI_CSI_DT_GLPKT_12,
+ MIPI_CSI_DT_GLPKT_13,
+ MIPI_CSI_DT_GLPKT_14,
+ MIPI_CSI_DT_GLPKT_15,
+ MIPI_CSI_DT_GLPKT_16,
+ MIPI_CSI_DT_GLPKT_17,
+ MIPI_CSI_DT_YUV_420_8B,
+ MIPI_CSI_DT_YUV_420_10B,
+ MIPI_CSI_DT_YUV_420_8B_LEGACY,
+ MIPI_CSI_DT_YUV_RSVD,
+ MIPI_CSI_DT_YUV_420_8B_CSPS,
+ MIPI_CSI_DT_YUV_420_10B_CSPS,
+ MIPI_CSI_DT_YUV_422_8B,
+ MIPI_CSI_DT_YUV_422_10B,
+ MIPI_CSI_DT_RGB_444,
+ MIPI_CSI_DT_RGB_555,
+ MIPI_CSI_DT_RGB_565,
+ MIPI_CSI_DT_RGB_666,
+ MIPI_CSI_DT_RGB_888,
+ MIPI_CSI_DT_RGB_RSVD_25,
+ MIPI_CSI_DT_RGB_RSVD_26,
+ MIPI_CSI_DT_RGB_RSVD_27,
+ MIPI_CSI_DT_RAW_6,
+ MIPI_CSI_DT_RAW_7,
+ MIPI_CSI_DT_RAW_8,
+ MIPI_CSI_DT_RAW_10,
+ MIPI_CSI_DT_RAW_12,
+ MIPI_CSI_DT_RAW_14,
+ MIPI_CSI_DT_RAW_16,
+ MIPI_CSI_DT_RAW_20,
+ MIPI_CSI_DT_USER_30,
+ MIPI_CSI_DT_USER_31,
+ MIPI_CSI_DT_USER_32,
+ MIPI_CSI_DT_USER_33,
+ MIPI_CSI_DT_USER_34,
+ MIPI_CSI_DT_USER_35,
+ MIPI_CSI_DT_USER_36,
+ MIPI_CSI_DT_USER_37,
+ MIPI_CSI_DT_RSVD_38,
+ MIPI_CSI_DT_RSVD_39,
+ MIPI_CSI_DT_RSVD_3A,
+ MIPI_CSI_DT_RSVD_3B,
+ MIPI_CSI_DT_RSVD_3C,
+ MIPI_CSI_DT_RSVD_3D,
+ MIPI_CSI_DT_RSVD_3E,
+ MIPI_CSI_DT_RSVD_3F
+};
+
+/**
+ * struct pixel_format - Data Type to string name structure
+ * @PixelFormat: MIPI CSI2 Data type
+ * @PixelFormatStr: String name of Data Type
+ */
+struct pixel_format {
+ enum CSI_DataTypes PixelFormat;
+ char PixelFormatStr[MAX_XIL_CSIDT_STR_LENGTH];
+};
+
+/**
+ * struct xcsi2rxss_event - Event log structure
+ * @mask: Event mask
+ * @name: Name of the event
+ * @counter: Count number of events
+ */
+struct xcsi2rxss_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+/*
+ * struct xcsi2rxss_core - Core configuration CSI2 Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @dphy_present: Flag for DPHY register interface presence
+ * @dphy_offset: DPHY registers offset
+ * @enable_active_lanes: If number of active lanes can be modified
+ * @max_num_lanes: Maximum number of lanes present
+ * @vfb: Video Format Bridge enabled or not
+ * @ppc: pixels per clock
+ * @vc: Virtual Channel
+ * @axis_tdata_width: AXI Stream data width
+ * @datatype: Data type filter
+ * @pxlformat: String with CSI pixel format from IP
+ * @num_lanes: Number of lanes requested from application
+ * @events: Structure to maintain event logs
+ * @vcx_events: Structure to maintain VCX event logs
+ * @en_vcx: If more than 4 VC are enabled.
+ * @cfg: Pointer to csi2rxss config structure
+ * @lite_aclk: AXI4-Lite interface clock
+ * @video_aclk: Video clock
+ * @dphy_clk_200M: 200MHz DPHY clock
+ * @rst_gpio: video_aresetn
+ */
+struct xcsi2rxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ u32 dphy_offset;
+ bool dphy_present;
+ bool enable_active_lanes;
+ u32 max_num_lanes;
+ bool vfb;
+ u32 ppc;
+ u32 vc;
+ u32 axis_tdata_width;
+ u32 datatype;
+ const char *pxlformat;
+ u32 num_lanes;
+ struct xcsi2rxss_event *events;
+ struct xcsi2rxss_event *vcx_events;
+ bool en_vcx;
+ const struct xcsi2rxss_feature *cfg;
+ struct clk *lite_aclk;
+ struct clk *video_aclk;
+ struct clk *dphy_clk_200M;
+ struct gpio_desc *rst_gpio;
+};
+
+/**
+ * struct xcsi2rxss_state - CSI2 Rx Subsystem device structure
+ * @core: Core structure for MIPI CSI2 Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @formats: Active V4L2 formats on each pad
+ * @default_format: default V4L2 media bus format
+ * @vip_format: format information corresponding to the active format
+ * @event: Holds the short packet event
+ * @lock: mutex for serializing operations
+ * @pads: media pads
+ * @npads: number of pads
+ * @streaming: Flag for storing streaming state
+ * @suspended: Flag for storing suspended state
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xcsi2rxss_state {
+ struct xcsi2rxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_mbus_framefmt formats;
+ struct v4l2_mbus_framefmt default_format;
+ const struct xvip_video_format *vip_format;
+ struct v4l2_event event;
+ struct mutex lock;
+ struct media_pad pads[XILINX_CSI_MEDIA_PADS];
+ unsigned int npads;
+ bool streaming;
+ bool suspended;
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v5_0 = {
+ .flags = XCSI_CLK_PROP | XCSI_DPHY_PROP | XCSI_DPHY_ADDR_PROP,
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v4_1 = {
+ .flags = XCSI_CLK_PROP | XCSI_DPHY_PROP,
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v4_0 = {
+ .flags = XCSI_CLK_PROP,
+};
+
+static const struct xcsi2rxss_feature xlnx_csi2rxss_v2_0 = {
+ .flags = 0,
+};
+
+static const struct of_device_id xcsi2rxss_of_id_table[] = {
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-2.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-3.0",
+ .data = &xlnx_csi2rxss_v2_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-4.0",
+ .data = &xlnx_csi2rxss_v4_0 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-4.1",
+ .data = &xlnx_csi2rxss_v4_1 },
+ { .compatible = "xlnx,mipi-csi2-rx-subsystem-5.0",
+ .data = &xlnx_csi2rxss_v5_0 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xcsi2rxss_of_id_table);
+
+static inline struct xcsi2rxss_state *
+to_xcsi2rxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsi2rxss_state, subdev);
+}
+
+/*
+ * Regsiter related operations
+ */
+static inline u32 xcsi2rxss_read(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr)
+{
+ return ioread32(xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_write(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 value)
+{
+ iowrite32(value, xcsi2rxss->iomem + addr);
+}
+
+static inline void xcsi2rxss_clr(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 clr)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) & ~clr);
+}
+
+static inline void xcsi2rxss_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr,
+ u32 set)
+{
+ xcsi2rxss_write(xcsi2rxss,
+ addr,
+ xcsi2rxss_read(xcsi2rxss, addr) | set);
+}
+
+static const struct pixel_format pixel_formats[] = {
+ { MIPI_CSI_DT_YUV_420_8B, "YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_10B, "YUV420_10bit" },
+ { MIPI_CSI_DT_YUV_420_8B_LEGACY, "Legacy_YUV420_8bit" },
+ { MIPI_CSI_DT_YUV_420_8B_CSPS, "YUV420_8bit_CSPS" },
+ { MIPI_CSI_DT_YUV_420_10B_CSPS, "YUV420_10bit_CSPS" },
+ { MIPI_CSI_DT_YUV_422_8B, "YUV422_8bit" },
+ { MIPI_CSI_DT_YUV_422_10B, "YUV422_10bit" },
+ { MIPI_CSI_DT_RGB_444, "RGB444" },
+ { MIPI_CSI_DT_RGB_555, "RGB555" },
+ { MIPI_CSI_DT_RGB_565, "RGB565" },
+ { MIPI_CSI_DT_RGB_666, "RGB666" },
+ { MIPI_CSI_DT_RGB_888, "RGB888" },
+ { MIPI_CSI_DT_RAW_6, "RAW6" },
+ { MIPI_CSI_DT_RAW_7, "RAW7" },
+ { MIPI_CSI_DT_RAW_8, "RAW8" },
+ { MIPI_CSI_DT_RAW_10, "RAW10" },
+ { MIPI_CSI_DT_RAW_12, "RAW12" },
+ { MIPI_CSI_DT_RAW_14, "RAW14"},
+ { MIPI_CSI_DT_RAW_16, "RAW16"},
+ { MIPI_CSI_DT_RAW_20, "RAW20"}
+};
+
+static struct xcsi2rxss_event xcsi2rxss_events[] = {
+ { XCSI_ISR_FR_MASK, "Frame Received", 0 },
+ { XCSI_ISR_VCX_MASK, "VCX Frame Errors", 0 },
+ { XCSI_ISR_ILC_MASK, "Invalid Lane Count Error", 0 },
+ { XCSI_ISR_SPFIFOF_MASK, "Short Packet FIFO OverFlow Error", 0 },
+ { XCSI_ISR_SPFIFONE_MASK, "Short Packet FIFO Not Empty", 0 },
+ { XCSI_ISR_SLBF_MASK, "Streamline Buffer Full Error", 0 },
+ { XCSI_ISR_STOP_MASK, "Lane Stop State", 0 },
+ { XCSI_ISR_SOTERR_MASK, "SOT Error", 0 },
+ { XCSI_ISR_SOTSYNCERR_MASK, "SOT Sync Error", 0 },
+ { XCSI_ISR_ECC2BERR_MASK, "2 Bit ECC Unrecoverable Error", 0 },
+ { XCSI_ISR_ECC1BERR_MASK, "1 Bit ECC Recoverable Error", 0 },
+ { XCSI_ISR_CRCERR_MASK, "CRC Error", 0 },
+ { XCSI_ISR_DATAIDERR_MASK, "Data Id Error", 0 },
+ { XCSI_ISR_VC3FSYNCERR_MASK, "Virtual Channel 3 Frame Sync Error", 0 },
+ { XCSI_ISR_VC3FLVLERR_MASK, "Virtual Channel 3 Frame Level Error", 0 },
+ { XCSI_ISR_VC2FSYNCERR_MASK, "Virtual Channel 2 Frame Sync Error", 0 },
+ { XCSI_ISR_VC2FLVLERR_MASK, "Virtual Channel 2 Frame Level Error", 0 },
+ { XCSI_ISR_VC1FSYNCERR_MASK, "Virtual Channel 1 Frame Sync Error", 0 },
+ { XCSI_ISR_VC1FLVLERR_MASK, "Virtual Channel 1 Frame Level Error", 0 },
+ { XCSI_ISR_VC0FSYNCERR_MASK, "Virtual Channel 0 Frame Sync Error", 0 },
+ { XCSI_ISR_VC0FLVLERR_MASK, "Virtual Channel 0 Frame Level Error", 0 }
+};
+
+#define XMIPICSISS_NUM_EVENTS ARRAY_SIZE(xcsi2rxss_events)
+
+#define XMIPICSISS_VCX_START (4)
+#define XMIPICSISS_MAX_VC (4)
+#define XMIPICSISS_MAX_VCX (16)
+
+/* There are 2 events frame sync and frame level error per VC */
+#define XMIPICSISS_VCX_NUM_EVENTS ((XMIPICSISS_MAX_VCX -\
+ XMIPICSISS_MAX_VC) * 2)
+
+/**
+ * xcsi2rxss_clr_and_set - Clear and set the register with a bitmask
+ * @xcsi2rxss: Xilinx MIPI CSI2 Rx Subsystem subdev core struct
+ * @addr: address of register
+ * @clr: bitmask to be cleared
+ * @set: bitmask to be set
+ *
+ * Clear a bit(s) of mask @clr in the register at address @addr, then set
+ * a bit(s) of mask @set in the register after.
+ */
+static void xcsi2rxss_clr_and_set(struct xcsi2rxss_core *xcsi2rxss,
+ u32 addr, u32 clr, u32 set)
+{
+ u32 reg;
+
+ reg = xcsi2rxss_read(xcsi2rxss, addr);
+ reg &= ~clr;
+ reg |= set;
+ xcsi2rxss_write(xcsi2rxss, addr, reg);
+}
+
+/**
+ * xcsi2rxss_pxlfmtstrtodt - Convert pixel format string got from dts
+ * to data type.
+ * @pxlfmtstr: String obtained while parsing device node
+ *
+ * This function takes a CSI pixel format string obtained while parsing
+ * device tree node and converts it to data type.
+ *
+ * Eg. "RAW8" string is converted to 0x2A.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format value from table
+ */
+static u32 xcsi2rxss_pxlfmtstrtodt(const char *pxlfmtstr)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (!strncmp(pixel_formats[Index].PixelFormatStr,
+ pxlfmtstr, MAX_XIL_CSIDT_STR_LENGTH))
+ return pixel_formats[Index].PixelFormat;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * xcsi2rxss_pxlfmtdttostr - Convert pixel format data type to string.
+ * @datatype: MIPI CSI-2 Data Type
+ *
+ * This function takes a CSI pixel format data type and returns a
+ * pointer to the string name.
+ *
+ * Eg. 0x2A returns "RAW8" string.
+ * Refer to MIPI CSI2 specification for details.
+ *
+ * Return: Equivalent pixel format string from table
+ */
+static const char *xcsi2rxss_pxlfmtdttostr(u32 datatype)
+{
+ u32 Index;
+ u32 MaxEntries = ARRAY_SIZE(pixel_formats);
+
+ for (Index = 0; Index < MaxEntries; Index++) {
+ if (pixel_formats[Index].PixelFormat == datatype)
+ return pixel_formats[Index].PixelFormatStr;
+ }
+
+ return NULL;
+}
+
+/**
+ * xcsi2rxss_enable - Enable or disable the CSI Core
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the MIPI CSI2 Rx Subsystem core.
+ * After enabling the CSI2 Rx core, the DPHY is enabled in case the
+ * register interface for it is present.
+ */
+static void xcsi2rxss_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ u32 DphyCtrlRegOffset = core->dphy_offset + XDPHY_CTRLREG_OFFSET;
+
+ if (flag) {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset,
+ XDPHY_CTRLREG_DPHYEN_MASK);
+ } else {
+ xcsi2rxss_write(core, XCSI_CCR_OFFSET, 0);
+ if (core->dphy_present)
+ xcsi2rxss_write(core, DphyCtrlRegOffset, 0);
+ }
+
+}
+
+/**
+ * xcsi2rxss_interrupts_enable - Enable or disable CSI interrupts
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ * @flag: true for enabling, false for disabling
+ *
+ * This function enables/disables the interrupts for the MIPI CSI2
+ * Rx Subsystem.
+ */
+static void xcsi2rxss_interrupts_enable(struct xcsi2rxss_core *core, bool flag)
+{
+ if (flag) {
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ xcsi2rxss_write(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_set(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ } else {
+ xcsi2rxss_clr(core, XCSI_IER_OFFSET, XCSI_INTR_MASK);
+ xcsi2rxss_clr(core, XCSI_GIER_OFFSET, XCSI_GIER_GIE_MASK);
+ }
+}
+
+/**
+ * xcsi2rxss_reset - Does a soft reset of the MIPI CSI2 Rx Subsystem
+ * @core: Core Xilinx CSI2 Rx Subsystem structure pointer
+ *
+ * Return: 0 - on success OR -ETIME if reset times out
+ */
+static int xcsi2rxss_reset(struct xcsi2rxss_core *core)
+{
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+
+ xcsi2rxss_set(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+
+ while (xcsi2rxss_read(core, XCSI_CSR_OFFSET) & XCSI_CSR_RIPCD_MASK) {
+ if (Timeout == 0) {
+ dev_err(core->dev, "Xilinx CSI2 Rx Subsystem Soft Reset Timeout!\n");
+ return -ETIME;
+ }
+
+ Timeout--;
+ udelay(1);
+ }
+
+ xcsi2rxss_clr(core, XCSI_CCR_OFFSET, XCSI_CCR_SOFTRESET_MASK);
+ return 0;
+}
+
+static void xcsi2rxss_stop_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, false);
+ xcsi2rxss_enable(&xcsi2rxss->core, false);
+}
+
+/**
+ * xcsi2rxss_irq_handler - Interrupt handler for CSI-2
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * In the interrupt handler, a list of event counters are updated for
+ * corresponding interrupts. This is useful to get status / debug.
+ * If the short packet FIFO not empty or overflow interrupt is received
+ * capture the short packet and notify of event occurrence
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xcsi2rxss_irq_handler(int irq, void *dev_id)
+{
+ struct xcsi2rxss_state *state = (struct xcsi2rxss_state *)dev_id;
+ struct xcsi2rxss_core *core = &state->core;
+ u32 status;
+
+ status = xcsi2rxss_read(core, XCSI_ISR_OFFSET) & XCSI_INTR_MASK;
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XCSI_ISR_SPFIFONE_MASK) {
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT;
+
+ *((u32 *)(&state->event.u.data)) =
+ xcsi2rxss_read(core, XCSI_SPKTR_OFFSET);
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SPFIFOF_MASK) {
+ dev_alert(core->dev, "Short packet FIFO overflowed\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SPKT_OVF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_SLBF_MASK) {
+ dev_alert(core->dev, "Stream Line Buffer Full!\n");
+ if (core->rst_gpio) {
+ gpiod_set_value(core->rst_gpio, 1);
+ /* minimum 40 dphy_clk_200M cycles */
+ ndelay(250);
+ gpiod_set_value(core->rst_gpio, 0);
+ }
+
+ xcsi2rxss_stop_stream(state);
+
+ memset(&state->event, 0, sizeof(state->event));
+
+ state->event.type = V4L2_EVENT_XLNXCSIRX_SLBF;
+
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XCSI_ISR_ALLINTR_MASK) {
+ unsigned int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (!(status & core->events[i].mask))
+ continue;
+ core->events[i].counter++;
+ dev_dbg(core->dev, "%s: %d\n", core->events[i].name,
+ core->events[i].counter);
+ }
+
+ if (status & XCSI_ISR_VCX_MASK && core->en_vcx) {
+ u32 vcxstatus;
+
+ vcxstatus = xcsi2rxss_read(core, XCSI_VCXR_OFFSET);
+ vcxstatus &= XCSI_VCXR_MASK;
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (!(vcxstatus & core->vcx_events[i].mask))
+ continue;
+ core->vcx_events[i].counter++;
+ }
+ xcsi2rxss_write(core, XCSI_VCXR_OFFSET, vcxstatus);
+ }
+ }
+
+ xcsi2rxss_write(core, XCSI_ISR_OFFSET, status);
+
+ return IRQ_HANDLED;
+}
+
+static void xcsi2rxss_reset_event_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++)
+ state->core.events[i].counter = 0;
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ state->core.vcx_events[i].counter = 0;
+}
+
+/**
+ * xcsi2rxss_log_counters - Print out the event counters.
+ * @state: Pointer to device state
+ *
+ */
+static void xcsi2rxss_log_counters(struct xcsi2rxss_state *state)
+{
+ int i;
+
+ for (i = 0; i < XMIPICSISS_NUM_EVENTS; i++) {
+ if (state->core.events[i].counter > 0)
+ v4l2_info(&state->subdev, "%s events: %d\n",
+ state->core.events[i].name,
+ state->core.events[i].counter);
+ }
+
+ if (!state->core.en_vcx)
+ return;
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++) {
+ if (state->core.vcx_events[i].counter > 0)
+ v4l2_info(&state->subdev,
+ "VC %d Frame %s error vcx events: %d\n",
+ (i / 2) + XMIPICSISS_VCX_START,
+ i & 1 ? "Sync" : "Level",
+ state->core.vcx_events[i].counter);
+ }
+}
+
+/**
+ * xcsi2rxss_log_status - Logs the status of the CSI-2 Receiver
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx MIPI CSI-2
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 reg, data, i, max_vc;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ xcsi2rxss_log_counters(xcsi2rxss);
+
+ v4l2_info(sd, "***** Core Status *****\n");
+ data = xcsi2rxss_read(core, XCSI_CSR_OFFSET);
+ v4l2_info(sd, "Short Packet FIFO Full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFOFULL_MASK));
+ v4l2_info(sd, "Short Packet FIFO Not Empty = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SPFIFONE_MASK));
+ v4l2_info(sd, "Stream line buffer full = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_SLBF_MASK));
+ v4l2_info(sd, "Soft reset/Core disable in progress = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CSR_RIPCD_MASK));
+
+ /* Clk & Lane Info */
+ v4l2_info(sd, "******** Clock Lane Info *********\n");
+ data = xcsi2rxss_read(core, XCSI_CLKINFR_OFFSET);
+ v4l2_info(sd, "Clock Lane in Stop State = %s\n",
+ XCSI_GET_BITSET_STR(data, XCSI_CLKINFR_STOP_MASK));
+
+ v4l2_info(sd, "******** Data Lane Info *********\n");
+ v4l2_info(sd, "Lane\tSoT Error\tSoT Sync Error\tStop State\n");
+ reg = XCSI_L0INFR_OFFSET;
+ for (i = 0; i < 4; i++) {
+ data = xcsi2rxss_read(core, reg);
+
+ v4l2_info(sd, "%d\t%s\t\t%s\t\t%s\n",
+ i,
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_SOTSYNCERR_MASK),
+ XCSI_GET_BITSET_STR(data, XCSI_LXINFR_STOP_MASK));
+
+ reg += 4;
+ }
+
+ /* Virtual Channel Image Information */
+ v4l2_info(sd, "********** Virtual Channel Info ************\n");
+ v4l2_info(sd, "VC\tLine Count\tByte Count\tData Type\n");
+ if (core->en_vcx)
+ max_vc = XMIPICSISS_MAX_VCX;
+ else
+ max_vc = XMIPICSISS_MAX_VC;
+
+ reg = XCSI_VC0INF1R_OFFSET;
+ for (i = 0; i < max_vc; i++) {
+ u32 line_count, byte_count, data_type;
+ char *datatypestr;
+
+ /* Get line and byte count from VCXINFR1 Register */
+ data = xcsi2rxss_read(core, reg);
+ byte_count = (data & XCSI_VCXINF1R_BYTECOUNT_MASK) >>
+ XCSI_VCXINF1R_BYTECOUNT_SHIFT;
+ line_count = (data & XCSI_VCXINF1R_LINECOUNT_MASK) >>
+ XCSI_VCXINF1R_LINECOUNT_SHIFT;
+
+ /* Get data type from VCXINFR2 Register */
+ reg += 4;
+ data = xcsi2rxss_read(core, reg);
+ data_type = (data & XCSI_VCXINF2R_DATATYPE_MASK) >>
+ XCSI_VCXINF2R_DATATYPE_SHIFT;
+ datatypestr = (char *)xcsi2rxss_pxlfmtdttostr(data_type);
+
+ v4l2_info(sd, "%d\t%d\t\t%d\t\t%s\n",
+ i, line_count, byte_count, datatypestr);
+
+ /* Move to next pair of VC Info registers */
+ reg += 4;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_subscribe_event - Subscribe to the custom short packet
+ * receive event.
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * There are two types of events to be subscribed.
+ *
+ * First is to register for receiving a short packet.
+ * The short packets received are queued up in a FIFO.
+ * On reception of a short packet, an event will be generated
+ * with the short packet contents copied to its data area.
+ * Application subscribed to this event will poll for POLLPRI.
+ * On getting the event, the app dequeues the event to get the short packet
+ * data.
+ *
+ * Second is to register for Short packet FIFO overflow
+ * In case the rate of receiving short packets is high and
+ * the short packet FIFO overflows, this event will be triggered.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXCSIRX_SPKT:
+ case V4L2_EVENT_XLNXCSIRX_SPKT_OVF:
+ case V4L2_EVENT_XLNXCSIRX_SLBF:
+ ret = v4l2_event_subscribe(fh, sub, XCSI_MAX_SPKT, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xcsi2rxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_s_ctrl - This is used to set the Xilinx MIPI CSI-2 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx MIPI
+ * CSI-2 Rx Subsystem. It is used to set the active lanes in the system.
+ * The event counters can be reset.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ u32 Timeout = XCSI_TIMEOUT_VAL;
+ u32 active_lanes = 1;
+
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_ACT_LANES:
+ /*
+ * This will be called only when "Enable Active Lanes" parameter
+ * is set in design
+ */
+ xcsi2rxss_clr_and_set(core, XCSI_PCR_OFFSET,
+ XCSI_PCR_ACTLANES_MASK, ctrl->val - 1);
+
+ /*
+ * If the core is enabled, wait for active lanes to be
+ * set.
+ *
+ * If core is disabled or there is no clock from DPHY Tx
+ * then the read back won't reflect the updated value
+ * as the PPI clock will not be present.
+ */
+
+ if (core->dphy_present) {
+ u32 dphyclkstatregoffset = core->dphy_offset +
+ XDPHY_CLKSTATREG_OFFSET;
+
+ u32 dphyclkstat =
+ xcsi2rxss_read(core, dphyclkstatregoffset) &
+ XDPHY_CLKSTATREG_MODE_MASK;
+
+ u32 coreenable =
+ xcsi2rxss_read(core, XCSI_CCR_OFFSET) &
+ XCSI_CCR_COREENB_MASK;
+
+ char lpmstr[] = "Low Power";
+ char hsmstr[] = "High Speed";
+ char esmstr[] = "Escape";
+ char *modestr;
+
+ switch (dphyclkstat) {
+ case 0:
+ modestr = lpmstr;
+ break;
+ case 1:
+ modestr = hsmstr;
+ break;
+ case 2:
+ modestr = esmstr;
+ break;
+ default:
+ modestr = NULL;
+ break;
+ }
+
+ dev_dbg(core->dev, "DPHY Clock Lane in %s mode\n",
+ modestr);
+
+ if ((dphyclkstat == XDPHY_HI_SPEED_MODE) &&
+ coreenable) {
+
+ /* Wait for core to apply new active lanes */
+ while (Timeout--)
+ udelay(1);
+
+ active_lanes =
+ xcsi2rxss_read(core, XCSI_PCR_OFFSET);
+ active_lanes &= XCSI_PCR_ACTLANES_MASK;
+ active_lanes++;
+
+ if (active_lanes != ctrl->val) {
+ dev_err(core->dev, "Failed to set active lanes!\n");
+ ret = -EAGAIN;
+ }
+ }
+ } else {
+ dev_dbg(core->dev, "No read back as no DPHY present.\n");
+ }
+
+ dev_dbg(core->dev, "Set active lanes: requested = %d, active = %d\n",
+ ctrl->val, active_lanes);
+ break;
+ case V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS:
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+/**
+ * xcsi2rxss_g_volatile_ctrl - get the Xilinx MIPI CSI-2 Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * This is used to get the number of frames received by the Xilinx
+ * MIPI CSI-2 Rx.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss =
+ container_of(ctrl->handler,
+ struct xcsi2rxss_state, ctrl_handler);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER:
+ ctrl->val = xcsi2rxss->core.events[0].counter;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return ret;
+}
+
+static int xcsi2rxss_start_stream(struct xcsi2rxss_state *xcsi2rxss)
+{
+ int ret;
+
+ xcsi2rxss_enable(&xcsi2rxss->core, true);
+
+ ret = xcsi2rxss_reset(&xcsi2rxss->core);
+ if (ret < 0)
+ return ret;
+
+ xcsi2rxss_interrupts_enable(&xcsi2rxss->core, true);
+
+ return 0;
+}
+
+
+/**
+ * xcsi2rxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx MIPI CSI-2 Rx Subsystem provided the device isn't in
+ * suspended state.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xcsi2rxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret = 0;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (xcsi2rxss->suspended) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (enable) {
+ if (!xcsi2rxss->streaming) {
+ /* reset the event counters */
+ xcsi2rxss_reset_event_counters(xcsi2rxss);
+
+ ret = xcsi2rxss_start_stream(xcsi2rxss);
+ if (ret == 0)
+ xcsi2rxss->streaming = true;
+ }
+ } else {
+ if (xcsi2rxss->streaming) {
+ struct gpio_desc *rst = xcsi2rxss->core.rst_gpio;
+
+ if (rst) {
+ gpiod_set_value_cansleep(rst, 1);
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(rst, 0);
+ }
+
+ xcsi2rxss_stop_stream(xcsi2rxss);
+ xcsi2rxss->streaming = false;
+ }
+ }
+unlock:
+ mutex_unlock(&xcsi2rxss->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsi2rxss_get_pad_format(struct xcsi2rxss_state *xcsi2rxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsi2rxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsi2rxss->formats;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xcsi2rxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ mutex_lock(&xcsi2rxss->lock);
+ fmt->format = *__xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time. So when a format set is requested by
+ * application, all parameters except the format type is
+ * saved for the pad and the original pad format is sent
+ * back to the application.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ u32 code;
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ /*
+ * Only the format->code parameter matters for CSI as the
+ * CSI format cannot be changed at runtime.
+ * Ensure that format to set is copied to over to CSI pad format
+ */
+ __format = __xcsi2rxss_get_pad_format(xcsi2rxss, cfg,
+ fmt->pad, fmt->which);
+
+ /*
+ * If trying to set format on source pad, then
+ * return the format set on sink pad
+ */
+ if (fmt->pad == 0) {
+ fmt->format = *__format;
+ goto unlock_set_fmt;
+ }
+
+ /* Save the pad format code */
+ code = __format->code;
+
+ /* If the bayer pattern to be set is SXXXX8 then only 1x8 type
+ * is supported and core's data type doesn't matter.
+ * In case the bayer pattern being set is SXXX10 then only
+ * 1x10 type are supported and core should be configured for RAW10.
+ * In case the bayer pattern being set is SXXX12 then only
+ * 1x12 type are supported and core should be configured for RAW12.
+ *
+ * Otherwise don't allow change.
+ */
+ if (((fmt->format.code == MEDIA_BUS_FMT_SBGGR8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG8_1X8) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB8_1X8))
+ || ((core->datatype == MIPI_CSI_DT_RAW_10) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG10_1X10) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB10_1X10)))
+ || ((core->datatype == MIPI_CSI_DT_RAW_12) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG12_1X12) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB12_1X12))) ||
+ ((core->datatype == MIPI_CSI_DT_RAW_16) &&
+ ((fmt->format.code == MEDIA_BUS_FMT_SBGGR16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGBRG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SGRBG16_1X16) ||
+ (fmt->format.code == MEDIA_BUS_FMT_SRGGB16_1X16))))
+
+ /* Copy over the format to be set */
+ *__format = fmt->format;
+ else {
+ /* Restore the original pad format code */
+ fmt->format.code = code;
+ __format->code = code;
+ __format->width = fmt->format.width;
+ __format->height = fmt->format.height;
+ }
+
+unlock_set_fmt:
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/**
+ * xcsi2rxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format
+ * for both pads.
+ *
+ * Return: 0 on success
+ */
+static int xcsi2rxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xcsi2rxss_state *xcsi2rxss = to_xcsi2rxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xcsi2rxss->default_format;
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 1);
+ *format = xcsi2rxss->default_format;
+
+ return 0;
+}
+
+static int xcsi2rxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xcsi2rxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xcsi2rxss_ctrl_ops = {
+ .g_volatile_ctrl = xcsi2rxss_g_volatile_ctrl,
+ .s_ctrl = xcsi2rxss_s_ctrl
+};
+
+static struct v4l2_ctrl_config xcsi2rxss_ctrls[] = {
+ {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_ACT_LANES,
+ .name = "MIPI CSI2 Rx Subsystem: Active Lanes",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 4,
+ .step = 1,
+ .def = 1,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER,
+ .name = "MIPI CSI2 Rx Subsystem: Frames Received Counter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xcsi2rxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS,
+ .name = "MIPI CSI2 Rx Subsystem: Reset Counters",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_WRITE_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xcsi2rxss_core_ops = {
+ .log_status = xcsi2rxss_log_status,
+ .subscribe_event = xcsi2rxss_subscribe_event,
+ .unsubscribe_event = xcsi2rxss_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xcsi2rxss_video_ops = {
+ .s_stream = xcsi2rxss_s_stream
+};
+
+static struct v4l2_subdev_pad_ops xcsi2rxss_pad_ops = {
+ .get_fmt = xcsi2rxss_get_format,
+ .set_fmt = xcsi2rxss_set_format,
+};
+
+static struct v4l2_subdev_ops xcsi2rxss_ops = {
+ .core = &xcsi2rxss_core_ops,
+ .video = &xcsi2rxss_video_ops,
+ .pad = &xcsi2rxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xcsi2rxss_internal_ops = {
+ .open = xcsi2rxss_open,
+ .close = xcsi2rxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+/*
+ * xcsi2rxss_pm_suspend - Function called on Power Suspend
+ * @dev: Pointer to device structure
+ *
+ * On power suspend the CSI-2 Core is disabled if the device isn't
+ * in suspended state and is streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_suspend(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if (!xcsi2rxss->suspended && xcsi2rxss->streaming)
+ xcsi2rxss_clr(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = true;
+
+ mutex_unlock(&xcsi2rxss->lock);
+
+ return 0;
+}
+
+/*
+ * xcsi2rxss_pm_resume - Function called on Power Resume
+ * @dev: Pointer to device structure
+ *
+ * On power resume the CSI-2 Core is enabled when it is in suspended state
+ * and prior to entering suspended state it was streaming.
+ *
+ * Return: 0 on success
+ */
+static int __maybe_unused xcsi2rxss_pm_resume(struct device *dev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = dev_get_drvdata(dev);
+
+ mutex_lock(&xcsi2rxss->lock);
+
+ if ((xcsi2rxss->suspended) && (xcsi2rxss->streaming))
+ xcsi2rxss_set(&xcsi2rxss->core,
+ XCSI_CCR_OFFSET, XCSI_CCR_COREENB_MASK);
+
+ xcsi2rxss->suspended = false;
+
+ mutex_unlock(&xcsi2rxss->lock);
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xcsi2rxss_parse_of(struct xcsi2rxss_state *xcsi2rxss)
+{
+ struct device_node *node = xcsi2rxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xcsi2rxss_core *core = &xcsi2rxss->core;
+ int ret;
+ bool iic_present;
+
+ if (core->cfg->flags & XCSI_CLK_PROP) {
+ core->lite_aclk = devm_clk_get(core->dev, "lite_aclk");
+ if (IS_ERR(core->lite_aclk)) {
+ ret = PTR_ERR(core->lite_aclk);
+ dev_err(core->dev, "failed to get lite_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->video_aclk = devm_clk_get(core->dev, "video_aclk");
+ if (IS_ERR(core->video_aclk)) {
+ ret = PTR_ERR(core->video_aclk);
+ dev_err(core->dev, "failed to get video_aclk (%d)\n",
+ ret);
+ return ret;
+ }
+
+ core->dphy_clk_200M = devm_clk_get(core->dev, "dphy_clk_200M");
+ if (IS_ERR(core->dphy_clk_200M)) {
+ ret = PTR_ERR(core->dphy_clk_200M);
+ dev_err(core->dev, "failed to get dphy_clk_200M (%d)\n",
+ ret);
+ return ret;
+ }
+ } else {
+ dev_info(core->dev, "assuming all required clocks are enabled!\n");
+ }
+
+ core->dphy_present = of_property_read_bool(node, "xlnx,dphy-present");
+ dev_dbg(core->dev, "DPHY present property = %s\n",
+ core->dphy_present ? "Present" : "Absent");
+
+ iic_present = of_property_read_bool(node, "xlnx,iic-present");
+ dev_dbg(core->dev, "IIC present property = %s\n",
+ iic_present ? "Present" : "Absent");
+
+ if (iic_present && (core->cfg->flags & XCSI_DPHY_PROP)) {
+ /*
+ * In IP v4.1 the DPHY offset is 0x10000, if present,
+ * and the iic is removed from subsystem.
+ */
+ dev_err(core->dev, "Invalid case - IIC present!");
+ return -EINVAL;
+ }
+
+ if (core->dphy_present) {
+ if (iic_present) {
+ core->dphy_offset = 0x20000;
+ } else {
+ if (core->cfg->flags & XCSI_DPHY_ADDR_PROP)
+ core->dphy_offset = 0x1000;
+ else
+ core->dphy_offset = 0x10000;
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-lanes",
+ &core->max_num_lanes);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,max-lanes property\n");
+ return ret;
+ }
+
+ if ((core->max_num_lanes > 4) || (core->max_num_lanes < 1)) {
+ dev_err(core->dev, "%d max lanes : invalid xlnx,max-lanes property\n",
+ core->max_num_lanes);
+ return -EINVAL;
+ }
+
+ core->en_vcx = of_property_read_bool(node, "xlnx,en-vcx");
+
+ ret = of_property_read_u32(node, "xlnx,vc", &core->vc);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,vc property\n");
+ return ret;
+ }
+ if ((core->vc > XMIPICSISS_MAX_VC && !core->en_vcx) ||
+ (core->vc > XMIPICSISS_MAX_VCX && core->en_vcx)) {
+ dev_err(core->dev, "invalid virtual channel property value.\n");
+ return -EINVAL;
+ }
+
+ core->enable_active_lanes =
+ of_property_read_bool(node, "xlnx,en-active-lanes");
+ dev_dbg(core->dev, "Enable active lanes property = %s\n",
+ core->enable_active_lanes ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,csi-pxl-format",
+ &core->pxlformat);
+ if (ret < 0) {
+ dev_err(core->dev, "missing xlnx,csi-pxl-format property\n");
+ return ret;
+ }
+
+ core->datatype = xcsi2rxss_pxlfmtstrtodt(core->pxlformat);
+ if ((core->datatype < MIPI_CSI_DT_YUV_420_8B) ||
+ (core->datatype > MIPI_CSI_DT_RAW_20)) {
+ dev_err(core->dev, "Invalid xlnx,csi-pxl-format string\n");
+ return -EINVAL;
+ }
+
+ core->vfb = of_property_read_bool(node, "xlnx,vfb");
+ dev_dbg(core->dev, "Video Format Bridge property = %s\n",
+ core->vfb ? "Present" : "Absent");
+
+ if (core->vfb) {
+ ret = of_property_read_u32(node, "xlnx,ppc", &core->ppc);
+ if ((ret < 0) || !((core->ppc == 1) ||
+ (core->ppc == 2) || (core->ppc == 4))) {
+ dev_err(core->dev, "Invalid xlnx,ppc property ret = %d ppc = %d\n",
+ ret, core->ppc);
+ return -EINVAL;
+ }
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ int ret;
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+ struct v4l2_fwnode_endpoint v4lendpoint = { 0 };
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ /*
+ * Currently only a subset of VFB enabled formats present in
+ * xvip are supported in the driver.
+ *
+ * If the VFB is disabled, the pixels per clock don't matter.
+ * The data width is either 32 or 64 bit as selected in design.
+ *
+ * For e.g. If Data Type is RGB888, VFB is disabled and
+ * data width is 32 bits.
+ *
+ * Clk Cycle | Byte 0 | Byte 1 | Byte 2 | Byte 3
+ * -----------+----------+----------+----------+----------
+ * 1 | B0 | G0 | R0 | B1
+ * 2 | G1 | R1 | B2 | G2
+ * 3 | R2 | B3 | G3 | R3
+ */
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ if (core->vfb &&
+ (format->vf_code != XVIP_VF_YUV_422) &&
+ (format->vf_code != XVIP_VF_RBG) &&
+ (format->vf_code != XVIP_VF_MONO_SENSOR)) {
+ dev_err(core->dev, "Invalid UG934 video format set.\n");
+ return -EINVAL;
+ }
+
+ /* Get and check the format description */
+ if (!xcsi2rxss->vip_format) {
+ xcsi2rxss->vip_format = format;
+ } else if (xcsi2rxss->vip_format != format) {
+ dev_err(core->dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &v4lendpoint);
+ if (ret) {
+ of_node_put(endpoint);
+ return ret;
+ }
+
+ of_node_put(endpoint);
+ dev_dbg(core->dev, "%s : port %d bus type = %d\n",
+ __func__, nports, v4lendpoint.bus_type);
+
+ if (v4lendpoint.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ dev_dbg(core->dev, "%s : base.port = %d base.id = %d\n",
+ __func__,
+ v4lendpoint.base.port,
+ v4lendpoint.base.id);
+
+ dev_dbg(core->dev, "%s : mipi number lanes = %d\n",
+ __func__,
+ v4lendpoint.bus.mipi_csi2.num_data_lanes);
+ } else {
+ dev_dbg(core->dev, "%s : Not a CSI2 bus\n", __func__);
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 2) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+ xcsi2rxss->npads = nports;
+
+ /*Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+
+ ret = devm_request_irq(core->dev, core->irq, xcsi2rxss_irq_handler,
+ IRQF_SHARED, "xilinx-csi2rxss", xcsi2rxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ /* Reset GPIO */
+ core->rst_gpio = devm_gpiod_get_optional(core->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(core->rst_gpio)) {
+ if (PTR_ERR(core->rst_gpio) != -EPROBE_DEFER)
+ dev_err(core->dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(core->rst_gpio);
+ }
+
+ return 0;
+}
+
+static int xcsi2rxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xcsi2rxss_state *xcsi2rxss;
+ struct resource *res;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ u32 i;
+ int ret;
+ int num_ctrls;
+
+ xcsi2rxss = devm_kzalloc(&pdev->dev, sizeof(*xcsi2rxss), GFP_KERNEL);
+ if (!xcsi2rxss)
+ return -ENOMEM;
+
+ mutex_init(&xcsi2rxss->lock);
+
+ xcsi2rxss->core.dev = &pdev->dev;
+
+ match = of_match_node(xcsi2rxss_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ xcsi2rxss->core.cfg = match->data;
+
+ ret = xcsi2rxss_parse_of(xcsi2rxss);
+ if (ret < 0)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xcsi2rxss->core.iomem = devm_ioremap_resource(xcsi2rxss->core.dev, res);
+ if (IS_ERR(xcsi2rxss->core.iomem))
+ return PTR_ERR(xcsi2rxss->core.iomem);
+
+ if (xcsi2rxss->core.cfg->flags & XCSI_CLK_PROP) {
+ unsigned long rate;
+
+ ret = clk_prepare_enable(xcsi2rxss->core.lite_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable lite_aclk (%d)\n",
+ ret);
+ goto clk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.video_aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable video_aclk (%d)\n",
+ ret);
+ goto video_aclk_err;
+ }
+
+ ret = clk_prepare_enable(xcsi2rxss->core.dphy_clk_200M);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable dphy clk (%d)\n",
+ ret);
+ goto dphy_clk_err;
+ }
+
+ ret = clk_set_rate(xcsi2rxss->core.dphy_clk_200M,
+ XCSI_DPHY_CLK_REQ);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set dphy clk rate (%d)\n",
+ ret);
+
+ goto all_clk_err;
+ }
+
+ rate = clk_get_rate(xcsi2rxss->core.dphy_clk_200M);
+ if (rate < XCSI_DPHY_CLK_MIN && rate > XCSI_DPHY_CLK_MAX) {
+ dev_err(&pdev->dev, "Err DPHY Clock = %lu\n",
+ rate);
+ ret = -EINVAL;
+ goto all_clk_err;
+ }
+ }
+
+ /*
+ * Reset and initialize the core.
+ */
+
+ if (xcsi2rxss->core.rst_gpio) {
+ gpiod_set_value_cansleep(xcsi2rxss->core.rst_gpio, 1);
+ /* minimum of 40 dphy_clk_200M cycles */
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(xcsi2rxss->core.rst_gpio, 0);
+ }
+
+ xcsi2rxss_reset(&xcsi2rxss->core);
+
+ xcsi2rxss->core.events = (struct xcsi2rxss_event *)&xcsi2rxss_events;
+
+ if (xcsi2rxss->core.en_vcx) {
+ u32 alloc_size;
+
+ alloc_size = sizeof(struct xcsi2rxss_event) *
+ XMIPICSISS_VCX_NUM_EVENTS;
+ xcsi2rxss->core.vcx_events = devm_kzalloc(&pdev->dev,
+ alloc_size,
+ GFP_KERNEL);
+ if (!xcsi2rxss->core.vcx_events) {
+ mutex_destroy(&xcsi2rxss->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XMIPICSISS_VCX_NUM_EVENTS; i++)
+ xcsi2rxss->core.vcx_events[i].mask = 1 << i;
+ }
+
+ /* Initialize V4L2 subdevice and media entity */
+ xcsi2rxss->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ xcsi2rxss->pads[1].flags = MEDIA_PAD_FL_SINK;
+
+ /* Initialize the default format */
+ memset(&xcsi2rxss->default_format, 0,
+ sizeof(xcsi2rxss->default_format));
+ xcsi2rxss->default_format.code = xcsi2rxss->vip_format->code;
+ xcsi2rxss->default_format.field = V4L2_FIELD_NONE;
+ xcsi2rxss->default_format.colorspace = V4L2_COLORSPACE_SRGB;
+ xcsi2rxss->default_format.width = XCSI_DEFAULT_WIDTH;
+ xcsi2rxss->default_format.height = XCSI_DEFAULT_HEIGHT;
+
+ xcsi2rxss->formats = xcsi2rxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xcsi2rxss->subdev;
+ v4l2_subdev_init(subdev, &xcsi2rxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsi2rxss_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xcsi2rxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xcsi2rxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xcsi2rxss->pads);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * In case the Enable Active Lanes config parameter is not set,
+ * dynamic lane reconfiguration is not allowed.
+ * So V4L2_CID_XILINX_MIPICSISS_ACT_LANES ctrl will not be registered.
+ * Accordingly allocate the number of controls
+ */
+ num_ctrls = ARRAY_SIZE(xcsi2rxss_ctrls);
+
+ if (!xcsi2rxss->core.enable_active_lanes)
+ num_ctrls--;
+
+ dev_dbg(xcsi2rxss->core.dev, "# of ctrls = %d\n", num_ctrls);
+
+ v4l2_ctrl_handler_init(&xcsi2rxss->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < ARRAY_SIZE(xcsi2rxss_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ if (xcsi2rxss_ctrls[i].id ==
+ V4L2_CID_XILINX_MIPICSISS_ACT_LANES) {
+
+ if (xcsi2rxss->core.enable_active_lanes) {
+ xcsi2rxss_ctrls[i].max =
+ xcsi2rxss->core.max_num_lanes;
+ xcsi2rxss_ctrls[i].def =
+ xcsi2rxss->core.max_num_lanes;
+ } else {
+ /* Don't register control */
+ dev_dbg(xcsi2rxss->core.dev,
+ "Skip active lane control\n");
+ continue;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "%d ctrl = 0x%x\n",
+ i, xcsi2rxss_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&xcsi2rxss->ctrl_handler,
+ &xcsi2rxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_err(xcsi2rxss->core.dev, "Failed for %s ctrl\n",
+ xcsi2rxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ dev_dbg(xcsi2rxss->core.dev, "# v4l2 ctrls registered = %d\n", i - 1);
+
+ if (xcsi2rxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xcsi2rxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xcsi2rxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xcsi2rxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xcsi2rxss);
+
+ dev_info(xcsi2rxss->core.dev, "Xilinx CSI2 Rx Subsystem device found!\n");
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ /* default states for streaming and suspend */
+ xcsi2rxss->streaming = false;
+ xcsi2rxss->suspended = false;
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+
+all_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+dphy_clk_err:
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+video_aclk_err:
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+clk_err:
+ return ret;
+}
+
+static int xcsi2rxss_remove(struct platform_device *pdev)
+{
+ struct xcsi2rxss_state *xcsi2rxss = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsi2rxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsi2rxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&xcsi2rxss->lock);
+ clk_disable_unprepare(xcsi2rxss->core.dphy_clk_200M);
+ clk_disable_unprepare(xcsi2rxss->core.video_aclk);
+ clk_disable_unprepare(xcsi2rxss->core.lite_aclk);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xcsi2rxss_pm_ops,
+ xcsi2rxss_pm_suspend, xcsi2rxss_pm_resume);
+
+static struct platform_driver xcsi2rxss_driver = {
+ .driver = {
+ .name = "xilinx-csi2rxss",
+ .pm = &xcsi2rxss_pm_ops,
+ .of_match_table = xcsi2rxss_of_id_table,
+ },
+ .probe = xcsi2rxss_probe,
+ .remove = xcsi2rxss_remove,
+};
+
+module_platform_driver(xcsi2rxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx MIPI CSI2 Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-demosaic.c b/drivers/media/platform/xilinx/xilinx-demosaic.c
new file mode 100644
index 000000000000..a519c2c9719b
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-demosaic.c
@@ -0,0 +1,418 @@
+/*
+ * Xilinx Video Demosaic IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XDEMOSAIC_AP_CTRL (0x00)
+#define XDEMOSAIC_WIDTH (0x10)
+#define XDEMOSAIC_HEIGHT (0x18)
+#define XDEMOSAIC_INPUT_BAYER_FORMAT (0x28)
+
+#define XDEMOSAIC_MIN_HEIGHT (64)
+#define XDEMOSAIC_MAX_HEIGHT (4320)
+#define XDEMOSAIC_DEF_HEIGHT (720)
+#define XDEMOSAIC_MIN_WIDTH (64)
+#define XDEMOSAIC_MAX_WIDTH (8192)
+#define XDEMOSAIC_DEF_WIDTH (1280)
+
+#define XDEMOSAIC_RESET_DEASSERT (0)
+#define XDEMOSAIC_RESET_ASSERT (1)
+#define XDEMOSAIC_START BIT(0)
+#define XDEMOSAIC_AUTO_RESTART BIT(7)
+#define XDEMOSAIC_STREAM_ON (XDEMOSAIC_AUTO_RESTART | XDEMOSAIC_START)
+
+enum xdmsc_bayer_format {
+ XDEMOSAIC_RGGB = 0,
+ XDEMOSAIC_GRBG,
+ XDEMOSAIC_GBRG,
+ XDEMOSAIC_BGGR,
+};
+
+struct xdmsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+
+ enum xdmsc_bayer_format bayer_fmt;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xdmsc_read(struct xdmsc_dev *xdmsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xdmsc->xvip, reg);
+ dev_dbg(xdmsc->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xdmsc_write(struct xdmsc_dev *xdmsc, u32 reg, u32 data)
+{
+ xvip_write(&xdmsc->xvip, reg, data);
+ dev_dbg(xdmsc->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+#ifdef DEBUG
+ if (xdmsc_read(xdmsc, reg) != data)
+ dev_err(xdmsc->xvip.dev,
+ "Wrote 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xdmsc_dev *to_xdmsc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xdmsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt
+*__xdmsc_get_pad_format(struct xdmsc_dev *xdmsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xdmsc->xvip.subdev,
+ cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xdmsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xdmsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ if (!enable) {
+ dev_dbg(xdmsc->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+ return 0;
+ }
+
+ xdmsc_write(xdmsc, XDEMOSAIC_WIDTH,
+ xdmsc->formats[XVIP_PAD_SINK].width);
+ xdmsc_write(xdmsc, XDEMOSAIC_HEIGHT,
+ xdmsc->formats[XVIP_PAD_SINK].height);
+ xdmsc_write(xdmsc, XDEMOSAIC_INPUT_BAYER_FORMAT, xdmsc->bayer_fmt);
+
+ /* Start Demosaic Video IP */
+ xdmsc_write(xdmsc, XDEMOSAIC_AP_CTRL, XDEMOSAIC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xdmsc_video_ops = {
+ .s_stream = xdmsc_s_stream,
+};
+
+static int xdmsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+
+ fmt->format = *__xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static bool
+xdmsc_is_format_bayer(struct xdmsc_dev *xdmsc, u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_RGGB;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GRBG;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_GBRG;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ xdmsc->bayer_fmt = XDEMOSAIC_BGGR;
+ break;
+ default:
+ dev_dbg(xdmsc->xvip.dev, "Unsupported format for Sink Pad");
+ return false;
+ }
+ return true;
+}
+
+static int xdmsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xdmsc_get_pad_format(xdmsc, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XDEMOSAIC_MIN_WIDTH, xdmsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XDEMOSAIC_MIN_HEIGHT, xdmsc->max_height);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24 &&
+ __format->code != MEDIA_BUS_FMT_RBG101010_1X30 &&
+ __format->code != MEDIA_BUS_FMT_RBG121212_1X36 &&
+ __format->code != MEDIA_BUS_FMT_RBG161616_1X48) {
+ dev_dbg(xdmsc->xvip.dev,
+ "%s : Unsupported source media bus code format",
+ __func__);
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (!xdmsc_is_format_bayer(xdmsc, __format->code)) {
+ dev_dbg(xdmsc->xvip.dev,
+ "Unsupported Sink Pad Media format, defaulting to RGGB");
+ __format->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ }
+ }
+
+ fmt->format = *__format;
+ return 0;
+}
+
+static int xdmsc_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xdmsc_dev *xdmsc = to_xdmsc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xdmsc->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xdmsc_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xdmsc_internal_ops = {
+ .open = xdmsc_open,
+ .close = xdmsc_close,
+};
+
+static const struct v4l2_subdev_pad_ops xdmsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xdmsc_get_format,
+ .set_fmt = xdmsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xdmsc_ops = {
+ .video = &xdmsc_video_ops,
+ .pad = &xdmsc_pad_ops,
+};
+
+static const struct media_entity_operations xdmsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xdmsc_parse_of(struct xdmsc_dev *xdmsc)
+{
+ struct device *dev = xdmsc->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height",
+ &xdmsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-height property!");
+ return -EINVAL;
+ } else if (xdmsc->max_height > XDEMOSAIC_MAX_HEIGHT ||
+ xdmsc->max_height < XDEMOSAIC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width",
+ &xdmsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "missing xlnx,max-width property!");
+ return -EINVAL;
+ } else if (xdmsc->max_width > XDEMOSAIC_MAX_WIDTH ||
+ xdmsc->max_width < XDEMOSAIC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ xdmsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xdmsc->rst_gpio)) {
+ if (PTR_ERR(xdmsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xdmsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xdmsc_probe(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval;
+
+ xdmsc = devm_kzalloc(&pdev->dev, sizeof(*xdmsc), GFP_KERNEL);
+ if (!xdmsc)
+ return -ENOMEM;
+ xdmsc->xvip.dev = &pdev->dev;
+ rval = xdmsc_parse_of(xdmsc);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xdmsc->xvip);
+
+ /* Reset Demosaic IP */
+ gpiod_set_value_cansleep(xdmsc->rst_gpio,
+ XDEMOSAIC_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xdmsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xdmsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xdmsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XDEMOSAIC_DEF_WIDTH;
+ def_fmt->height = XDEMOSAIC_DEF_HEIGHT;
+
+ /*
+ * Sink Pad can be any Bayer format.
+ * Default Sink Pad format is RGGB.
+ */
+ def_fmt->code = MEDIA_BUS_FMT_SRGGB8_1X8;
+ xdmsc->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xdmsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xdmsc->default_formats[XVIP_PAD_SINK];
+
+ /* Source Pad has a fixed media bus format of RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ xdmsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xdmsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xdmsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xdmsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xdmsc->pads);
+ if (rval < 0)
+ goto media_error;
+
+ platform_set_drvdata(pdev, xdmsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx Video Demosaic Probe Successful");
+ return 0;
+
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return rval;
+}
+
+static int xdmsc_remove(struct platform_device *pdev)
+{
+ struct xdmsc_dev *xdmsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xdmsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xdmsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xdmsc_of_id_table[] = {
+ {.compatible = "xlnx,v-demosaic"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xdmsc_of_id_table);
+
+static struct platform_driver xdmsc_driver = {
+ .driver = {
+ .name = "xilinx-demosaic",
+ .of_match_table = xdmsc_of_id_table,
+ },
+ .probe = xdmsc_probe,
+ .remove = xdmsc_remove,
+
+};
+
+module_platform_driver(xdmsc_driver);
+MODULE_DESCRIPTION("Xilinx Demosaic IP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 2a56201cb853..0bb9b863ec9f 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -10,11 +10,13 @@
*/
#include <linux/dma/xilinx_dma.h>
+#include <linux/dma/xilinx_frmbuf.h>
#include <linux/lcm.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/xilinx-v4l2-controls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -36,6 +38,11 @@
#define XVIP_DMA_MIN_HEIGHT 1U
#define XVIP_DMA_MAX_HEIGHT 8191U
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
/* -----------------------------------------------------------------------------
* Helper functions
*/
@@ -62,7 +69,7 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
int ret;
subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
- if (subdev == NULL)
+ if (!subdev)
return -EPIPE;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
@@ -70,10 +77,15 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
if (ret < 0)
return ret == -ENOIOCTLCMD ? -EINVAL : ret;
- if (dma->fmtinfo->code != fmt.format.code ||
- dma->format.height != fmt.format.height ||
- dma->format.width != fmt.format.width ||
- dma->format.colorspace != fmt.format.colorspace)
+ if (dma->fmtinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ /*
+ * Crop rectangle contains format resolution by default, and crop
+ * rectangle if s_selection is executed.
+ */
+ if (dma->r.width != fmt.format.width ||
+ dma->r.height != fmt.format.height)
return -EINVAL;
return 0;
@@ -83,45 +95,6 @@ static int xvip_dma_verify_format(struct xvip_dma *dma)
* Pipeline Stream Management
*/
-/**
- * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
- * @pipe: The pipeline
- * @start: Start (when true) or stop (when false) the pipeline
- *
- * Walk the entities chain starting at the pipeline output video node and start
- * or stop all of them.
- *
- * Return: 0 if successful, or the return value of the failed video::s_stream
- * operation otherwise.
- */
-static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
-{
- struct xvip_dma *dma = pipe->output;
- struct media_entity *entity;
- struct media_pad *pad;
- struct v4l2_subdev *subdev;
- int ret;
-
- entity = &dma->video.entity;
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- pad = media_entity_remote_pad(pad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- break;
-
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
-
- ret = v4l2_subdev_call(subdev, video, s_stream, start);
- if (start && ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return 0;
-}
/**
* xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
@@ -133,7 +106,8 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
* independently, pipelines have a shared stream state that enable or disable
* all entities in the pipeline. For this reason the pipeline uses a streaming
* counter that tracks the number of DMA engines that have requested the stream
- * to be enabled.
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
*
* When called with the @on argument set to true, this function will increment
* the pipeline streaming count. If the streaming count reaches the number of
@@ -150,20 +124,22 @@ static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
*/
static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
{
+ struct xvip_composite_device *xdev;
int ret = 0;
mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
if (on) {
if (pipe->stream_count == pipe->num_dmas - 1) {
- ret = xvip_pipeline_start_stop(pipe, true);
+ ret = xvip_graph_start_stop(xdev, true);
if (ret < 0)
goto done;
}
pipe->stream_count++;
} else {
if (--pipe->stream_count == 0)
- xvip_pipeline_start_stop(pipe, false);
+ xvip_graph_start_stop(xdev, false);
}
done:
@@ -200,23 +176,22 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
dma = to_xvip_dma(media_entity_to_video_device(entity));
- if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
- pipe->output = dma;
+ if (dma->pad.flags & MEDIA_PAD_FL_SINK)
num_outputs++;
- } else {
+ else
num_inputs++;
- }
}
mutex_unlock(&mdev->graph_mutex);
media_graph_walk_cleanup(&graph);
- /* We need exactly one output and zero or one input. */
- if (num_outputs != 1 || num_inputs > 1)
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
return -EPIPE;
pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
return 0;
}
@@ -224,7 +199,6 @@ static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
{
pipe->num_dmas = 0;
- pipe->output = NULL;
}
/**
@@ -287,11 +261,13 @@ done:
* @buf: vb2 buffer base object
* @queue: buffer list entry in the DMA engine queued buffers list
* @dma: DMA channel that uses the buffer
+ * @desc: Descriptor associated with this structure
*/
struct xvip_dma_buffer {
struct vb2_v4l2_buffer buf;
struct list_head queue;
struct xvip_dma *dma;
+ struct dma_async_tx_descriptor *desc;
};
#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
@@ -300,6 +276,9 @@ static void xvip_dma_complete(void *param)
{
struct xvip_dma_buffer *buf = param;
struct xvip_dma *dma = buf->dma;
+ int i, sizeimage;
+ u32 fid;
+ int status;
spin_lock(&dma->queued_lock);
list_del(&buf->queue);
@@ -308,7 +287,38 @@ static void xvip_dma_complete(void *param)
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = dma->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
- vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
+
+ status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
+ if (!status) {
+ if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
+ dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
+ dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
+ /*
+ * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
+ * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
+ */
+ buf->buf.field = fid ?
+ V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+
+ if (fid == dma->prev_fid)
+ buf->buf.sequence = dma->sequence++;
+
+ buf->buf.sequence >>= 1;
+ dma->prev_fid = fid;
+ }
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
+ }
+ } else {
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
+ }
+
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
}
@@ -318,13 +328,39 @@ xvip_dma_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct xvip_dma *dma = vb2_get_drv_priv(vq);
+ u8 i;
+ int sizeimage;
+
+ /* Multi planar case: Make sure the image size is large enough */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ if (*nplanes) {
+ if (*nplanes != dma->format.fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ if (sizes[i] < sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = dma->fmtinfo->buffers;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ sizeimage =
+ dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
+ sizes[i] = sizeimage;
+ }
+ }
+ return 0;
+ }
- /* Make sure the image size is large enough. */
- if (*nplanes)
- return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
+ /* Single planar case: Make sure the image size is large enough */
+ sizeimage = dma->format.fmt.pix.sizeimage;
+ if (*nplanes == 1)
+ return sizes[0] < sizeimage ? -EINVAL : 0;
*nplanes = 1;
- sizes[0] = dma->format.sizeimage;
+ sizes[0] = sizeimage;
return 0;
}
@@ -348,14 +384,20 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
struct dma_async_tx_descriptor *desc;
dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
u32 flags;
+ u32 luma_size;
+ u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
+ u32 fid = ~0;
+ u32 bpl;
- if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_DEV_TO_MEM;
dma->xt.src_sgl = false;
dma->xt.dst_sgl = true;
dma->xt.dst_start = addr;
- } else {
+ } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
@@ -363,10 +405,69 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
dma->xt.src_start = addr;
}
- dma->xt.frame_size = 1;
- dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
- dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
- dma->xt.numf = dma->format.height;
+ /*
+ * DMA IP supports only 2 planes, so one datachunk is sufficient
+ * to get start address of 2nd plane
+ */
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+
+ xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (dma->r.width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = bpl - dma->sgl[0].size;
+ dma->xt.numf = dma->r.height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ dma->sgl[0].dst_icg = bpl *
+ (pix_mp->height - dma->r.height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 2) {
+ dma_addr_t chroma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ luma_size = bpl * dma->xt.numf;
+ if (chroma_addr > addr)
+ dma->sgl[0].dst_icg = chroma_addr -
+ addr - luma_size;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ bpl = pix->bytesperline;
+ xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
+ xvip_width_padding_factor(pix->pixelformat,
+ &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
+ &bpl_deno);
+ dma->xt.frame_size = dma->fmtinfo->num_planes;
+ dma->sgl[0].size = (dma->r.width * dma->fmtinfo->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ dma->sgl[0].icg = bpl - dma->sgl[0].size;
+ dma->xt.numf = dma->r.height;
+ dma->sgl[0].dst_icg = 0;
+ dma->sgl[0].dst_icg = bpl * (pix->height - dma->r.height);
+ }
desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
if (!desc) {
@@ -376,11 +477,28 @@ static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
}
desc->callback = xvip_dma_complete;
desc->callback_param = buf;
+ buf->desc = desc;
+
+ if (buf->buf.field == V4L2_FIELD_TOP)
+ fid = 1;
+ else if (buf->buf.field == V4L2_FIELD_BOTTOM)
+ fid = 0;
+ else if (buf->buf.field == V4L2_FIELD_NONE)
+ fid = 0;
+
+ xilinx_xdma_set_fid(dma->dma, desc, fid);
spin_lock_irq(&dma->queued_lock);
list_add_tail(&buf->queue, &dma->queued_bufs);
spin_unlock_irq(&dma->queued_lock);
+ /*
+ * Low latency capture: Give descriptor callback at start of
+ * processing the descriptor
+ */
+ if (dma->low_latency_cap)
+ xilinx_xdma_set_earlycb(dma->dma, desc,
+ EARLY_CALLBACK_START_DESC);
dmaengine_submit(desc);
if (vb2_is_streaming(&dma->queue))
@@ -395,6 +513,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
int ret;
dma->sequence = 0;
+ dma->prev_fid = ~0;
/*
* Start streaming on the pipeline. No link touching an entity in the
@@ -403,10 +522,12 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
* Use the pipeline object embedded in the first DMA object that starts
* streaming.
*/
+ mutex_lock(&dma->xdev->lock);
pipe = dma->video.entity.pipe
? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ mutex_unlock(&dma->xdev->lock);
if (ret < 0)
goto error;
@@ -423,11 +544,25 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
/* Start the DMA engine. This must be done before starting the blocks
* in the pipeline to avoid DMA synchronization issues.
+ * We dont't want to start DMA in case of low latency capture mode,
+ * applications will start DMA using S_CTRL at later point of time.
*/
- dma_async_issue_pending(dma->dma);
+ if (!dma->low_latency_cap) {
+ dma_async_issue_pending(dma->dma);
+ } else {
+ /* For low latency capture, return the first buffer early
+ * so that consumer can initialize until we start DMA.
+ */
+ buf = list_first_entry(&dma->queued_bufs,
+ struct xvip_dma_buffer, queue);
+ xvip_dma_complete(buf);
+ buf->desc->callback = NULL;
+ }
/* Start the pipeline. */
- xvip_pipeline_set_stream(pipe, true);
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
return 0;
@@ -435,6 +570,7 @@ error_stop:
media_pipeline_stop(&dma->video.entity);
error:
+ dmaengine_terminate_all(dma->dma);
/* Give back all queued buffers to videobuf2. */
spin_lock_irq(&dma->queued_lock);
list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
@@ -502,6 +638,100 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
return 0;
}
+static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
+ struct v4l2_subdev_format *v4l_fmt)
+{
+ const struct xvip_video_format *fmt;
+ int ret;
+ u32 i, fmt_cnt, *fmts;
+
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ /* Has media pad value changed? */
+ if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
+ !dma->remote_subdev_med_bus) {
+ /* Re-generate legal list of fourcc codes */
+ dma->poss_v4l2_fmt_cnt = 0;
+ dma->remote_subdev_med_bus = v4l_fmt->format.code;
+
+ if (!dma->poss_v4l2_fmts) {
+ dma->poss_v4l2_fmts =
+ devm_kzalloc(&dma->video.dev,
+ sizeof(u32) * fmt_cnt,
+ GFP_KERNEL);
+ if (!dma->poss_v4l2_fmts)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code != dma->remote_subdev_med_bus)
+ continue;
+
+ dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
+ }
+ }
+
+ /* Return err if index is greater than count of legal values */
+ if (f->index >= dma->poss_v4l2_fmt_cnt)
+ return -EINVAL;
+
+ /* Else return pix format in table */
+ fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int
+xvip_dma_enum_input(struct file *file, void *priv, struct v4l2_input *i)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ struct v4l2_subdev *subdev;
+
+ if (i->index > 0)
+ return -EINVAL;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, NULL);
+ if (!subdev)
+ return -EPIPE;
+
+ /*
+ * FIXME: right now only camera input type is handled.
+ * There should be mechanism to distinguish other types of
+ * input like V4L2_INPUT_TYPE_TUNER and V4L2_INPUT_TYPE_TOUCH.
+ */
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, subdev->name, sizeof(i->name));
+
+ return 0;
+}
+
+static int
+xvip_dma_get_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int
+xvip_dma_set_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
/* FIXME: without this callback function, some applications are not configured
* with correct formats, and it results in frames in wrong format. Whether this
* callback needs to be required is not clearly defined, so it should be
@@ -512,11 +742,43 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ const struct xvip_video_format *fmt;
+ int err, ret;
+
+ /* Establish media pad format */
+ subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ /*
+ * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
+ * to enumerate formats otherwise return the pix format corresponding
+ * to subdev's media bus format. This kind of separation would be
+ * helpful for clean up and upstreaming.
+ */
+ err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
+ if (!err)
+ return err;
+ /*
+ * This logic will just return one pix format based on subdev's
+ * media bus format
+ */
if (f->index > 0)
return -EINVAL;
- f->pixelformat = dma->format.pixelformat;
+ fmt = xvip_get_format_by_code(v4l_fmt.format.code);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
return 0;
}
@@ -527,13 +789,17 @@ xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- format->fmt.pix = dma->format;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ format->fmt.pix_mp = dma->format.fmt.pix_mp;
+ else
+ format->fmt.pix = dma->format.fmt.pix;
return 0;
}
static void
-__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
+__xvip_dma_try_format(struct xvip_dma *dma,
+ struct v4l2_format *format,
const struct xvip_video_format **fmtinfo)
{
const struct xvip_video_format *info;
@@ -544,40 +810,151 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
unsigned int width;
unsigned int align;
unsigned int bpl;
+ unsigned int i, hsub, vsub, plane_width, plane_height;
+ unsigned int fourcc;
+ unsigned int padding_factor_nume, padding_factor_deno;
+ unsigned int bpl_nume, bpl_deno;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
+ if (!subdev)
+ return;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return;
+
+ if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
+ } else {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
+ else
+ dma->format.fmt.pix.field = V4L2_FIELD_NONE;
+ }
/* Retrieve format information and select the default format if the
* requested format isn't supported.
*/
- info = xvip_get_format_by_fourcc(pix->pixelformat);
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
+ fourcc = format->fmt.pix_mp.pixelformat;
+ else
+ fourcc = format->fmt.pix.pixelformat;
+
+ info = xvip_get_format_by_fourcc(fourcc);
+
if (IS_ERR(info))
info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- pix->pixelformat = info->fourcc;
- pix->field = V4L2_FIELD_NONE;
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
/* The transfer alignment requirements are expressed in bytes. Compute
* the minimum and maximum values, clamp the requested width and convert
* it back to pixels.
*/
- align = lcm(dma->align, info->bpp);
+ align = lcm(dma->align, info->bpp >> 3);
+ if (!align) {
+ dev_err(dma->xdev->dev,
+ "transfer alignment is 0: dma->align = %x, bpp = %u\n",
+ dma->align, info->bpp);
+ return;
+ }
+
min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
- width = rounddown(pix->width * info->bpp, align);
- pix->width = clamp(width, min_width, max_width) / info->bpp;
- pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
- XVIP_DMA_MAX_HEIGHT);
-
- /* Clamp the requested bytes per line value. If the maximum bytes per
- * line value is zero, the module doesn't support user configurable line
- * sizes. Override the requested value with the minimum in that case.
- */
- min_bpl = pix->width * info->bpp;
- max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
- bpl = rounddown(pix->bytesperline, dma->align);
-
- pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
- pix->sizeimage = pix->bytesperline * pix->height;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ pix_mp = &format->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ pix_mp->field = dma->format.fmt.pix_mp.field;
+ width = rounddown(pix_mp->width * info->bpl_factor, align);
+ pix_mp->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+
+ /* Handling contiguous data with mplanes */
+ if (info->buffers == 1) {
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = roundup(plane_fmt[0].bytesperline, dma->align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage =
+ plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats */
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = info->hsub;
+ vsub = info->vsub;
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? hsub : 1);
+ plane_height = pix_mp->height / (i ? vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ bpl = rounddown(plane_fmt[i].bytesperline,
+ dma->align);
+ plane_fmt[i].bytesperline =
+ clamp(bpl, min_bpl, max_bpl);
+ plane_fmt[i].sizeimage =
+ plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &format->fmt.pix;
+ pix->field = dma->format.fmt.pix.field;
+ width = rounddown(pix->width * info->bpl_factor, align);
+ pix->width = clamp(width, min_width, max_width) /
+ info->bpl_factor;
+ pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
+ XVIP_DMA_MAX_HEIGHT);
+
+ min_bpl = (pix->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, dma->align);
+ max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
+ bpl = rounddown(pix->bytesperline, dma->align);
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->width * pix->height * info->bpp / 8;
+ }
if (fmtinfo)
*fmtinfo = info;
@@ -589,7 +966,7 @@ xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
struct v4l2_fh *vfh = file->private_data;
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
- __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
+ __xvip_dma_try_format(dma, format, NULL);
return 0;
}
@@ -600,26 +977,149 @@ xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
const struct xvip_video_format *info;
- __xvip_dma_try_format(dma, &format->fmt.pix, &info);
+ __xvip_dma_try_format(dma, format, &info);
if (vb2_is_busy(&dma->queue))
return -EBUSY;
- dma->format = format->fmt.pix;
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ dma->format.fmt.pix_mp = format->fmt.pix_mp;
+
+ /*
+ * Save format resolution in crop rectangle. This will be
+ * updated when s_slection is called.
+ */
+ dma->r.width = format->fmt.pix_mp.width;
+ dma->r.height = format->fmt.pix_mp.height;
+ } else {
+ dma->format.fmt.pix = format->fmt.pix;
+ dma->r.width = format->fmt.pix.width;
+ dma->r.height = format->fmt.pix.height;
+ }
+
dma->fmtinfo = info;
return 0;
}
+static int
+xvip_dma_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ u32 width, height;
+ bool crop_frame = false;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ crop_frame = true;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ crop_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+
+ if (crop_frame) {
+ sel->r.width = dma->r.width;
+ sel->r.height = dma->r.height;
+ } else {
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ width = dma->format.fmt.pix_mp.width;
+ height = dma->format.fmt.pix_mp.height;
+ } else {
+ width = dma->format.fmt.pix.width;
+ height = dma->format.fmt.pix.height;
+ }
+
+ sel->r.width = width;
+ sel->r.height = height;
+ }
+
+ return 0;
+}
+
+static int
+xvip_dma_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
+ u32 width, height;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /* COMPOSE target is only valid for capture buftype */
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /* CROP target is only valid for output buftype */
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
+ width = dma->format.fmt.pix_mp.width;
+ height = dma->format.fmt.pix_mp.height;
+ } else {
+ width = dma->format.fmt.pix.width;
+ height = dma->format.fmt.pix.height;
+ }
+
+ if (sel->r.width > width || sel->r.height > height ||
+ sel->r.top != 0 || sel->r.left != 0)
+ return -EINVAL;
+
+ sel->r.width = roundup(max(XVIP_DMA_MIN_WIDTH, sel->r.width),
+ dma->align);
+ sel->r.height = max(XVIP_DMA_MIN_HEIGHT, sel->r.height);
+ dma->r.width = sel->r.width;
+ dma->r.height = sel->r.height;
+
+ return 0;
+}
+
static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
.vidioc_querycap = xvip_dma_querycap,
.vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
+ .vidioc_enum_fmt_vid_out = xvip_dma_enum_format,
.vidioc_g_fmt_vid_cap = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_dma_get_format,
.vidioc_g_fmt_vid_out = xvip_dma_get_format,
+ .vidioc_g_fmt_vid_out_mplane = xvip_dma_get_format,
.vidioc_s_fmt_vid_cap = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_dma_set_format,
.vidioc_s_fmt_vid_out = xvip_dma_set_format,
+ .vidioc_s_fmt_vid_out_mplane = xvip_dma_set_format,
.vidioc_try_fmt_vid_cap = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_dma_try_format,
.vidioc_try_fmt_vid_out = xvip_dma_try_format,
+ .vidioc_try_fmt_vid_out_mplane = xvip_dma_try_format,
+ .vidioc_s_selection = xvip_dma_s_selection,
+ .vidioc_g_selection = xvip_dma_g_selection,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
@@ -628,6 +1128,99 @@ static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_input = &xvip_dma_enum_input,
+ .vidioc_g_input = &xvip_dma_get_input,
+ .vidioc_s_input = &xvip_dma_set_input,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int xvip_dma_s_ctrl(struct v4l2_ctrl *ctl)
+{
+ struct xvip_dma *dma = container_of(ctl->handler, struct xvip_dma,
+ ctrl_handler);
+ int ret = 0;
+
+ switch (ctl->id) {
+ case V4L2_CID_XILINX_LOW_LATENCY:
+ if (ctl->val == XVIP_LOW_LATENCY_ENABLE) {
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->low_latency_cap = true;
+ /*
+ * Don't use auto-restart for low latency
+ * to avoid extra one frame delay between
+ * programming and actual writing of data
+ */
+ xilinx_xdma_set_mode(dma->dma, DEFAULT);
+ } else if (ctl->val == XVIP_LOW_LATENCY_DISABLE) {
+ if (vb2_is_busy(&dma->queue))
+ return -EBUSY;
+
+ dma->low_latency_cap = false;
+ xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
+ } else if (ctl->val == XVIP_START_DMA) {
+ /*
+ * In low latency capture, the driver allows application
+ * to start dma when queue has buffers. That's why we
+ * don't check for vb2_is_busy().
+ */
+ if (dma->low_latency_cap &&
+ vb2_is_streaming(&dma->queue))
+ dma_async_issue_pending(dma->dma);
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xvip_dma_open(struct file *file)
+{
+ int ret;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ /* Disable the low latency mode as default */
+ if (v4l2_fh_is_singular_file(file)) {
+ struct xvip_dma *dma = video_drvdata(file);
+
+ mutex_lock(&dma->lock);
+ dma->low_latency_cap = false;
+ xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
+ mutex_unlock(&dma->lock);
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xvip_dma_ctrl_ops = {
+ .s_ctrl = xvip_dma_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config xvip_dma_ctrls[] = {
+ {
+ .ops = &xvip_dma_ctrl_ops,
+ .id = V4L2_CID_XILINX_LOW_LATENCY,
+ .name = "Low Latency Controls",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = XVIP_LOW_LATENCY_ENABLE,
+ .max = XVIP_START_DMA,
+ .step = 1,
+ .def = XVIP_LOW_LATENCY_DISABLE,
+ }
};
/* -----------------------------------------------------------------------------
@@ -637,7 +1230,7 @@ static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
static const struct v4l2_file_operations xvip_dma_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
- .open = v4l2_fh_open,
+ .open = xvip_dma_open,
.release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
@@ -652,6 +1245,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
{
char name[16];
int ret;
+ u32 i, hsub, vsub, width, height;
dma->xdev = xdev;
dma->port = port;
@@ -661,41 +1255,131 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
spin_lock_init(&dma->queued_lock);
dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
- dma->format.pixelformat = dma->fmtinfo->fourcc;
- dma->format.colorspace = V4L2_COLORSPACE_SRGB;
- dma->format.field = V4L2_FIELD_NONE;
- dma->format.width = XVIP_DMA_DEF_WIDTH;
- dma->format.height = XVIP_DMA_DEF_HEIGHT;
- dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
- dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
+ dma->format.type = type;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ pix_mp = &dma->format.fmt.pix_mp;
+ pix_mp->pixelformat = dma->fmtinfo->fourcc;
+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_DMA_DEF_WIDTH;
+
+ /* Handling contiguous data with mplanes */
+ if (dma->fmtinfo->buffers == 1) {
+ pix_mp->plane_fmt[0].bytesperline =
+ pix_mp->width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ pix_mp->width * pix_mp->height *
+ dma->fmtinfo->bpp / 8;
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ hsub = dma->fmtinfo->hsub;
+ vsub = dma->fmtinfo->vsub;
+ for (i = 0; i < dma->fmtinfo->buffers; i++) {
+ width = pix_mp->width / (i ? hsub : 1);
+ height = pix_mp->height / (i ? vsub : 1);
+ pix_mp->plane_fmt[i].bytesperline =
+ width * dma->fmtinfo->bpl_factor;
+ pix_mp->plane_fmt[i].sizeimage = width * height;
+ }
+ }
+ } else {
+ struct v4l2_pix_format *pix;
+
+ pix = &dma->format.fmt.pix;
+ pix->pixelformat = dma->fmtinfo->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+ pix->width = XVIP_DMA_DEF_WIDTH;
+ pix->height = XVIP_DMA_DEF_HEIGHT;
+ pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
+ pix->sizeimage =
+ pix->width * pix->height * dma->fmtinfo->bpp / 8;
+ }
/* Initialize the media entity... */
- dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->pad.flags = MEDIA_PAD_FL_SINK;
+ else
+ dma->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
if (ret < 0)
goto error;
+ ret = v4l2_ctrl_handler_init(&dma->ctrl_handler,
+ ARRAY_SIZE(xvip_dma_ctrls));
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to initialize V4L2 ctrl\n");
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xvip_dma_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(dma->xdev->dev, "%d ctrl = 0x%x\n", i,
+ xvip_dma_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&dma->ctrl_handler,
+ &xvip_dma_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_err(dma->xdev->dev, "Failed for %s ctrl\n",
+ xvip_dma_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ if (dma->ctrl_handler.error) {
+ dev_err(dma->xdev->dev, "failed to add controls\n");
+ ret = dma->ctrl_handler.error;
+ goto error;
+ }
+
+ dma->video.ctrl_handler = &dma->ctrl_handler;
+ ret = v4l2_ctrl_handler_setup(&dma->ctrl_handler);
+ if (ret < 0) {
+ dev_err(dma->xdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
/* ... and the video node... */
dma->video.fops = &xvip_dma_fops;
dma->video.v4l2_dev = &xdev->v4l2_dev;
dma->video.queue = &dma->queue;
snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
xdev->dev->of_node,
- type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
+ (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ? "output" : "input",
port);
+
dma->video.vfl_type = VFL_TYPE_VIDEO;
- dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? VFL_DIR_RX : VFL_DIR_TX;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ dma->video.vfl_dir = VFL_DIR_RX;
+ else
+ dma->video.vfl_dir = VFL_DIR_TX;
+
dma->video.release = video_device_release_empty;
dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
dma->video.lock = &dma->lock;
dma->video.device_caps = V4L2_CAP_STREAMING;
- if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ switch (dma->format.type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
- else
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ break;
+ }
video_set_drvdata(&dma->video, dma);
@@ -756,6 +1440,7 @@ void xvip_dma_cleanup(struct xvip_dma *dma)
if (!IS_ERR_OR_NULL(dma->dma))
dma_release_channel(dma->dma);
+ v4l2_ctrl_handler_free(&dma->ctrl_handler);
media_entity_cleanup(&dma->video.entity);
mutex_destroy(&dma->lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 2378bdae57ae..31154bcd1717 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -18,6 +18,7 @@
#include <linux/videodev2.h>
#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/videobuf2-v4l2.h>
@@ -32,7 +33,7 @@ struct xvip_video_format;
* @use_count: number of DMA engines using the pipeline
* @stream_count: number of DMA engines currently streaming
* @num_dmas: number of DMA engines in the pipeline
- * @output: DMA engine at the output of the pipeline
+ * @xdev: Composite device the pipe belongs to
*/
struct xvip_pipeline {
struct media_pipeline pipe;
@@ -42,7 +43,7 @@ struct xvip_pipeline {
unsigned int stream_count;
unsigned int num_dmas;
- struct xvip_dma *output;
+ struct xvip_composite_device *xdev;
};
static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
@@ -55,12 +56,17 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @list: list entry in a composite device dmas list
* @video: V4L2 video device associated with the DMA channel
* @pad: media pad for the video device entity
+ * @remote_subdev_med_bus: media bus format of sub-device
+ * @ctrl_handler: V4L2 ctrl_handler for inheritance ctrls from subdev
* @xdev: composite device the DMA channel belongs to
* @pipe: pipeline belonging to the DMA channel
* @port: composite device DT node port number for the DMA channel
* @lock: protects the @format, @fmtinfo and @queue fields
* @format: active V4L2 pixel format
+ * @r: crop rectangle parameters
* @fmtinfo: format information corresponding to the active @format
+ * @poss_v4l2_fmts: All possible v4l formats supported
+ * @poss_v4l2_fmt_cnt: number of supported v4l formats
* @queue: vb2 buffers queue
* @sequence: V4L2 buffers sequence number
* @queued_bufs: list of queued buffers
@@ -69,19 +75,27 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @align: transfer alignment required by the DMA channel (in bytes)
* @xt: dma interleaved template for dma configuration
* @sgl: data chunk structure for dma_interleaved_template
+ * @prev_fid: Previous Field ID
+ * @low_latency_cap: Low latency capture mode
*/
struct xvip_dma {
struct list_head list;
struct video_device video;
struct media_pad pad;
+ u32 remote_subdev_med_bus;
+
+ struct v4l2_ctrl_handler ctrl_handler;
struct xvip_composite_device *xdev;
struct xvip_pipeline pipe;
unsigned int port;
struct mutex lock;
- struct v4l2_pix_format format;
+ struct v4l2_format format;
+ struct v4l2_rect r;
const struct xvip_video_format *fmtinfo;
+ u32 *poss_v4l2_fmts;
+ u32 poss_v4l2_fmt_cnt;
struct vb2_queue queue;
unsigned int sequence;
@@ -93,6 +107,9 @@ struct xvip_dma {
unsigned int align;
struct dma_interleaved_template xt;
struct data_chunk sgl[1];
+
+ u32 prev_fid;
+ u32 low_latency_cap;
};
#define to_xvip_dma(vdev) container_of(vdev, struct xvip_dma, video)
diff --git a/drivers/media/platform/xilinx/xilinx-gamma-coeff.h b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
new file mode 100644
index 000000000000..344260008a47
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma-coeff.h
@@ -0,0 +1,5385 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_GAMMA_COEFF_H__
+#define __XILINX_GAMMA_COEFF_H__
+
+#define GAMMA_CURVE_LENGTH (40)
+
+#define GAMMA_BPC_8 (8)
+#define GAMMA8_TABLE_LENGTH BIT(GAMMA_BPC_8)
+static const u16 xgamma8_01[GAMMA8_TABLE_LENGTH] = {
+ 0, 147, 157, 164, 168, 172, 175, 178,
+ 180, 183, 184, 186, 188, 189, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 204, 205, 206, 207,
+ 207, 208, 208, 209, 210, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 218, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 222, 223, 223, 223, 224, 224, 224,
+ 225, 225, 225, 226, 226, 226, 227, 227,
+ 227, 227, 228, 228, 228, 228, 229, 229,
+ 229, 230, 230, 230, 230, 231, 231, 231,
+ 231, 232, 232, 232, 232, 232, 233, 233,
+ 233, 233, 234, 234, 234, 234, 234, 235,
+ 235, 235, 235, 235, 236, 236, 236, 236,
+ 236, 237, 237, 237, 237, 237, 238, 238,
+ 238, 238, 238, 239, 239, 239, 239, 239,
+ 239, 240, 240, 240, 240, 240, 240, 241,
+ 241, 241, 241, 241, 241, 242, 242, 242,
+ 242, 242, 242, 243, 243, 243, 243, 243,
+ 243, 244, 244, 244, 244, 244, 244, 244,
+ 245, 245, 245, 245, 245, 245, 245, 246,
+ 246, 246, 246, 246, 246, 246, 247, 247,
+ 247, 247, 247, 247, 247, 247, 248, 248,
+ 248, 248, 248, 248, 248, 249, 249, 249,
+ 249, 249, 249, 249, 249, 249, 250, 250,
+ 250, 250, 250, 250, 250, 250, 251, 251,
+ 251, 251, 251, 251, 251, 251, 251, 252,
+ 252, 252, 252, 252, 252, 252, 252, 252,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 253, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254, 254, 255, 255, 255, 255, 255,
+};
+
+static const u16 xgamma8_02[GAMMA8_TABLE_LENGTH] = {
+ 0, 84, 97, 105, 111, 116, 120, 124,
+ 128, 131, 133, 136, 138, 141, 143, 145,
+ 147, 148, 150, 152, 153, 155, 156, 158,
+ 159, 160, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 186, 187, 188,
+ 188, 189, 190, 190, 191, 192, 192, 193,
+ 193, 194, 195, 195, 196, 196, 197, 197,
+ 198, 199, 199, 200, 200, 201, 201, 202,
+ 202, 203, 203, 204, 204, 205, 205, 206,
+ 206, 207, 207, 208, 208, 208, 209, 209,
+ 210, 210, 211, 211, 211, 212, 212, 213,
+ 213, 214, 214, 214, 215, 215, 216, 216,
+ 216, 217, 217, 217, 218, 218, 219, 219,
+ 219, 220, 220, 220, 221, 221, 221, 222,
+ 222, 223, 223, 223, 224, 224, 224, 225,
+ 225, 225, 226, 226, 226, 227, 227, 227,
+ 227, 228, 228, 228, 229, 229, 229, 230,
+ 230, 230, 231, 231, 231, 231, 232, 232,
+ 232, 233, 233, 233, 233, 234, 234, 234,
+ 235, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 237, 238, 238, 238, 238, 239,
+ 239, 239, 239, 240, 240, 240, 240, 241,
+ 241, 241, 241, 242, 242, 242, 242, 243,
+ 243, 243, 243, 244, 244, 244, 244, 245,
+ 245, 245, 245, 246, 246, 246, 246, 246,
+ 247, 247, 247, 247, 248, 248, 248, 248,
+ 248, 249, 249, 249, 249, 250, 250, 250,
+ 250, 250, 251, 251, 251, 251, 252, 252,
+ 252, 252, 252, 253, 253, 253, 253, 253,
+ 254, 254, 254, 254, 254, 255, 255, 255,
+};
+
+static const u16 xgamma8_03[GAMMA8_TABLE_LENGTH] = {
+ 0, 48, 60, 67, 73, 78, 83, 87,
+ 90, 94, 97, 99, 102, 104, 107, 109,
+ 111, 113, 115, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 131, 133, 134, 136,
+ 137, 138, 139, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 151, 152, 153, 154,
+ 155, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168,
+ 168, 169, 170, 171, 172, 172, 173, 174,
+ 174, 175, 176, 177, 177, 178, 179, 179,
+ 180, 181, 181, 182, 183, 183, 184, 185,
+ 185, 186, 187, 187, 188, 188, 189, 190,
+ 190, 191, 191, 192, 193, 193, 194, 194,
+ 195, 195, 196, 197, 197, 198, 198, 199,
+ 199, 200, 200, 201, 201, 202, 202, 203,
+ 203, 204, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 210, 211,
+ 211, 212, 212, 213, 213, 213, 214, 214,
+ 215, 215, 216, 216, 217, 217, 217, 218,
+ 218, 219, 219, 220, 220, 220, 221, 221,
+ 222, 222, 223, 223, 223, 224, 224, 225,
+ 225, 225, 226, 226, 227, 227, 227, 228,
+ 228, 229, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 232, 233, 233, 233, 234,
+ 234, 235, 235, 235, 236, 236, 236, 237,
+ 237, 237, 238, 238, 238, 239, 239, 240,
+ 240, 240, 241, 241, 241, 242, 242, 242,
+ 243, 243, 243, 244, 244, 244, 245, 245,
+ 245, 246, 246, 246, 247, 247, 247, 248,
+ 248, 248, 249, 249, 249, 249, 250, 250,
+ 250, 251, 251, 251, 252, 252, 252, 253,
+ 253, 253, 253, 254, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_04[GAMMA8_TABLE_LENGTH] = {
+ 0, 28, 37, 43, 48, 53, 57, 61,
+ 64, 67, 70, 73, 75, 78, 80, 82,
+ 84, 86, 88, 90, 92, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 108, 110,
+ 111, 113, 114, 115, 117, 118, 119, 120,
+ 122, 123, 124, 125, 126, 127, 129, 130,
+ 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 149, 150, 151, 152, 153,
+ 154, 155, 155, 156, 157, 158, 159, 160,
+ 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 167, 168, 169, 170, 170, 171, 172,
+ 173, 173, 174, 175, 175, 176, 177, 177,
+ 178, 179, 179, 180, 181, 182, 182, 183,
+ 183, 184, 185, 185, 186, 187, 187, 188,
+ 189, 189, 190, 190, 191, 192, 192, 193,
+ 194, 194, 195, 195, 196, 197, 197, 198,
+ 198, 199, 199, 200, 201, 201, 202, 202,
+ 203, 203, 204, 205, 205, 206, 206, 207,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 213, 214, 214, 215, 215,
+ 216, 216, 217, 217, 218, 218, 219, 219,
+ 220, 220, 221, 221, 222, 222, 223, 223,
+ 224, 224, 225, 225, 226, 226, 227, 227,
+ 228, 228, 229, 229, 230, 230, 230, 231,
+ 231, 232, 232, 233, 233, 234, 234, 235,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 239, 240, 240, 240, 241, 241, 242,
+ 242, 243, 243, 243, 244, 244, 245, 245,
+ 246, 246, 246, 247, 247, 248, 248, 248,
+ 249, 249, 250, 250, 251, 251, 251, 252,
+ 252, 253, 253, 253, 254, 254, 255, 255,
+};
+
+static const u16 xgamma8_05[GAMMA8_TABLE_LENGTH] = {
+ 0, 16, 23, 28, 32, 36, 39, 42,
+ 45, 48, 50, 53, 55, 58, 60, 62,
+ 64, 66, 68, 70, 71, 73, 75, 77,
+ 78, 80, 81, 83, 84, 86, 87, 89,
+ 90, 92, 93, 94, 96, 97, 98, 100,
+ 101, 102, 103, 105, 106, 107, 108, 109,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 156,
+ 156, 157, 158, 159, 160, 160, 161, 162,
+ 163, 164, 164, 165, 166, 167, 167, 168,
+ 169, 170, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 181, 182, 183, 183, 184, 185, 186,
+ 186, 187, 188, 188, 189, 190, 190, 191,
+ 192, 192, 193, 194, 194, 195, 196, 196,
+ 197, 198, 198, 199, 199, 200, 201, 201,
+ 202, 203, 203, 204, 204, 205, 206, 206,
+ 207, 208, 208, 209, 209, 210, 211, 211,
+ 212, 212, 213, 214, 214, 215, 215, 216,
+ 217, 217, 218, 218, 219, 220, 220, 221,
+ 221, 222, 222, 223, 224, 224, 225, 225,
+ 226, 226, 227, 228, 228, 229, 229, 230,
+ 230, 231, 231, 232, 233, 233, 234, 234,
+ 235, 235, 236, 236, 237, 237, 238, 238,
+ 239, 240, 240, 241, 241, 242, 242, 243,
+ 243, 244, 244, 245, 245, 246, 246, 247,
+ 247, 248, 248, 249, 249, 250, 250, 251,
+ 251, 252, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_06[GAMMA8_TABLE_LENGTH] = {
+ 0, 9, 14, 18, 21, 24, 27, 29,
+ 32, 34, 37, 39, 41, 43, 45, 47,
+ 48, 50, 52, 54, 55, 57, 59, 60,
+ 62, 63, 65, 66, 68, 69, 71, 72,
+ 73, 75, 76, 77, 79, 80, 81, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 94, 95, 96, 97, 98, 99, 100, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126,
+ 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 137, 138, 139, 140, 141,
+ 142, 143, 144, 145, 145, 146, 147, 148,
+ 149, 150, 151, 151, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 161, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174,
+ 175, 176, 176, 177, 178, 179, 179, 180,
+ 181, 182, 182, 183, 184, 185, 185, 186,
+ 187, 188, 188, 189, 190, 191, 191, 192,
+ 193, 194, 194, 195, 196, 196, 197, 198,
+ 199, 199, 200, 201, 201, 202, 203, 203,
+ 204, 205, 206, 206, 207, 208, 208, 209,
+ 210, 210, 211, 212, 212, 213, 214, 214,
+ 215, 216, 216, 217, 218, 218, 219, 220,
+ 220, 221, 222, 222, 223, 224, 224, 225,
+ 226, 226, 227, 228, 228, 229, 230, 230,
+ 231, 231, 232, 233, 233, 234, 235, 235,
+ 236, 237, 237, 238, 238, 239, 240, 240,
+ 241, 242, 242, 243, 243, 244, 245, 245,
+ 246, 247, 247, 248, 248, 249, 250, 250,
+ 251, 251, 252, 253, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_07[GAMMA8_TABLE_LENGTH] = {
+ 0, 5, 9, 11, 14, 16, 18, 21,
+ 23, 25, 26, 28, 30, 32, 33, 35,
+ 37, 38, 40, 41, 43, 44, 46, 47,
+ 49, 50, 52, 53, 54, 56, 57, 58,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 79, 80, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150,
+ 150, 151, 152, 153, 154, 155, 156, 157,
+ 157, 158, 159, 160, 161, 162, 163, 163,
+ 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 182, 183,
+ 184, 185, 186, 186, 187, 188, 189, 190,
+ 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 197, 198, 199, 200, 201, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 208,
+ 209, 210, 211, 211, 212, 213, 214, 214,
+ 215, 216, 217, 217, 218, 219, 220, 220,
+ 221, 222, 223, 223, 224, 225, 226, 226,
+ 227, 228, 228, 229, 230, 231, 231, 232,
+ 233, 234, 234, 235, 236, 237, 237, 238,
+ 239, 239, 240, 241, 242, 242, 243, 244,
+ 244, 245, 246, 247, 247, 248, 249, 249,
+ 250, 251, 251, 252, 253, 254, 254, 255,
+};
+
+static const u16 xgamma8_08[GAMMA8_TABLE_LENGTH] = {
+ 0, 3, 5, 7, 9, 11, 13, 14,
+ 16, 18, 19, 21, 22, 24, 25, 26,
+ 28, 29, 31, 32, 33, 35, 36, 37,
+ 39, 40, 41, 42, 44, 45, 46, 47,
+ 48, 50, 51, 52, 53, 54, 56, 57,
+ 58, 59, 60, 61, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 88, 89, 90, 91, 92,
+ 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139,
+ 140, 140, 141, 142, 143, 144, 145, 146,
+ 147, 148, 149, 150, 151, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161,
+ 161, 162, 163, 164, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182,
+ 183, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196,
+ 196, 197, 198, 199, 200, 201, 202, 202,
+ 203, 204, 205, 206, 207, 207, 208, 209,
+ 210, 211, 212, 212, 213, 214, 215, 216,
+ 217, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236,
+ 236, 237, 238, 239, 240, 240, 241, 242,
+ 243, 244, 245, 245, 246, 247, 248, 249,
+ 249, 250, 251, 252, 253, 253, 254, 255,
+};
+
+static const u16 xgamma8_09[GAMMA8_TABLE_LENGTH] = {
+ 0, 2, 3, 5, 6, 7, 9, 10,
+ 11, 13, 14, 15, 16, 18, 19, 20,
+ 21, 22, 23, 25, 26, 27, 28, 29,
+ 30, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 114, 115, 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 143, 144,
+ 145, 146, 147, 148, 149, 150, 151, 152,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 170, 171, 172, 173, 174,
+ 175, 176, 177, 178, 179, 180, 181, 182,
+ 183, 184, 185, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 198, 199, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 210, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241,
+ 241, 242, 243, 244, 245, 246, 247, 248,
+ 249, 250, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_10[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_11[GAMMA8_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 3, 3, 4, 5,
+ 6, 6, 7, 8, 9, 10, 10, 11,
+ 12, 13, 14, 15, 16, 16, 17, 18,
+ 19, 20, 21, 22, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 149, 150, 151, 152,
+ 153, 154, 155, 156, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 214, 215, 216, 217, 218, 219, 220,
+ 221, 222, 223, 224, 225, 227, 228, 229,
+ 230, 231, 232, 233, 234, 235, 236, 237,
+ 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255,
+};
+
+static const u16 xgamma8_12[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 5, 5, 6, 7, 7, 8, 9,
+ 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20,
+ 21, 22, 23, 24, 24, 25, 26, 27,
+ 28, 28, 29, 30, 31, 32, 33, 34,
+ 34, 35, 36, 37, 38, 39, 40, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153,
+ 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 165, 166, 167, 168, 169, 170, 171,
+ 172, 173, 175, 176, 177, 178, 179, 180,
+ 181, 183, 184, 185, 186, 187, 188, 189,
+ 191, 192, 193, 194, 195, 196, 197, 199,
+ 200, 201, 202, 203, 204, 205, 207, 208,
+ 209, 210, 211, 212, 214, 215, 216, 217,
+ 218, 219, 221, 222, 223, 224, 225, 226,
+ 228, 229, 230, 231, 232, 234, 235, 236,
+ 237, 238, 239, 241, 242, 243, 244, 245,
+ 247, 248, 249, 250, 251, 253, 254, 255,
+};
+
+static const u16 xgamma8_13[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 2, 2, 2,
+ 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 14, 14, 15, 16, 16,
+ 17, 18, 19, 19, 20, 21, 21, 22,
+ 23, 24, 24, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 36, 37, 38, 39, 40, 41, 41,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 112,
+ 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 124, 125, 126, 127, 128, 129,
+ 130, 131, 132, 133, 135, 136, 137, 138,
+ 139, 140, 141, 143, 144, 145, 146, 147,
+ 148, 149, 151, 152, 153, 154, 155, 156,
+ 157, 159, 160, 161, 162, 163, 164, 166,
+ 167, 168, 169, 170, 172, 173, 174, 175,
+ 176, 178, 179, 180, 181, 182, 184, 185,
+ 186, 187, 188, 190, 191, 192, 193, 194,
+ 196, 197, 198, 199, 201, 202, 203, 204,
+ 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224,
+ 226, 227, 228, 229, 231, 232, 233, 234,
+ 236, 237, 238, 240, 241, 242, 243, 245,
+ 246, 247, 249, 250, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_14[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2,
+ 2, 2, 3, 3, 4, 4, 4, 5,
+ 5, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 12, 12, 13, 13,
+ 14, 15, 15, 16, 16, 17, 18, 18,
+ 19, 20, 20, 21, 22, 22, 23, 24,
+ 25, 25, 26, 27, 28, 28, 29, 30,
+ 31, 31, 32, 33, 34, 34, 35, 36,
+ 37, 38, 38, 39, 40, 41, 42, 43,
+ 43, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94, 95, 96,
+ 97, 98, 99, 100, 101, 103, 104, 105,
+ 106, 107, 108, 109, 110, 111, 112, 113,
+ 115, 116, 117, 118, 119, 120, 121, 122,
+ 124, 125, 126, 127, 128, 129, 130, 132,
+ 133, 134, 135, 136, 137, 139, 140, 141,
+ 142, 143, 145, 146, 147, 148, 149, 151,
+ 152, 153, 154, 155, 157, 158, 159, 160,
+ 161, 163, 164, 165, 166, 168, 169, 170,
+ 171, 173, 174, 175, 176, 178, 179, 180,
+ 181, 183, 184, 185, 187, 188, 189, 190,
+ 192, 193, 194, 196, 197, 198, 200, 201,
+ 202, 203, 205, 206, 207, 209, 210, 211,
+ 213, 214, 215, 217, 218, 219, 221, 222,
+ 223, 225, 226, 227, 229, 230, 232, 233,
+ 234, 236, 237, 238, 240, 241, 242, 244,
+ 245, 247, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_15[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 2, 2, 2, 3, 3, 3, 4,
+ 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11,
+ 11, 12, 12, 13, 14, 14, 15, 15,
+ 16, 16, 17, 18, 18, 19, 20, 20,
+ 21, 21, 22, 23, 23, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 37,
+ 38, 39, 40, 41, 41, 42, 43, 44,
+ 45, 46, 46, 47, 48, 49, 50, 51,
+ 52, 53, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 88, 89, 90,
+ 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 112, 113, 114, 115, 116,
+ 117, 119, 120, 121, 122, 123, 124, 126,
+ 127, 128, 129, 130, 132, 133, 134, 135,
+ 136, 138, 139, 140, 141, 142, 144, 145,
+ 146, 147, 149, 150, 151, 152, 154, 155,
+ 156, 158, 159, 160, 161, 163, 164, 165,
+ 167, 168, 169, 171, 172, 173, 174, 176,
+ 177, 178, 180, 181, 182, 184, 185, 187,
+ 188, 189, 191, 192, 193, 195, 196, 197,
+ 199, 200, 202, 203, 204, 206, 207, 209,
+ 210, 211, 213, 214, 216, 217, 218, 220,
+ 221, 223, 224, 226, 227, 228, 230, 231,
+ 233, 234, 236, 237, 239, 240, 242, 243,
+ 245, 246, 248, 249, 251, 252, 254, 255,
+};
+
+static const u16 xgamma8_16[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 3,
+ 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 7, 7, 8, 8, 9,
+ 9, 10, 10, 11, 11, 12, 12, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 18, 18, 19, 19, 20, 21, 21, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 29, 30, 31, 31, 32, 33,
+ 34, 34, 35, 36, 37, 38, 38, 39,
+ 40, 41, 42, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75,
+ 76, 77, 78, 79, 80, 81, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92,
+ 93, 94, 95, 97, 98, 99, 100, 101,
+ 102, 103, 104, 106, 107, 108, 109, 110,
+ 111, 113, 114, 115, 116, 117, 119, 120,
+ 121, 122, 123, 125, 126, 127, 128, 130,
+ 131, 132, 133, 135, 136, 137, 138, 140,
+ 141, 142, 143, 145, 146, 147, 149, 150,
+ 151, 153, 154, 155, 157, 158, 159, 161,
+ 162, 163, 165, 166, 167, 169, 170, 171,
+ 173, 174, 176, 177, 178, 180, 181, 183,
+ 184, 185, 187, 188, 190, 191, 193, 194,
+ 196, 197, 198, 200, 201, 203, 204, 206,
+ 207, 209, 210, 212, 213, 215, 216, 218,
+ 219, 221, 222, 224, 225, 227, 228, 230,
+ 231, 233, 235, 236, 238, 239, 241, 242,
+ 244, 245, 247, 249, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_17[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 3, 3, 3, 3, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 13, 13, 14, 14,
+ 15, 15, 16, 17, 17, 18, 18, 19,
+ 19, 20, 21, 21, 22, 22, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 36, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 73, 74, 75, 76, 77, 78,
+ 79, 80, 81, 82, 83, 84, 85, 86,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 97, 98, 99, 100, 101, 102, 103, 105,
+ 106, 107, 108, 109, 111, 112, 113, 114,
+ 115, 117, 118, 119, 120, 122, 123, 124,
+ 125, 127, 128, 129, 131, 132, 133, 134,
+ 136, 137, 138, 140, 141, 142, 144, 145,
+ 146, 148, 149, 151, 152, 153, 155, 156,
+ 157, 159, 160, 162, 163, 164, 166, 167,
+ 169, 170, 172, 173, 174, 176, 177, 179,
+ 180, 182, 183, 185, 186, 188, 189, 191,
+ 192, 194, 195, 197, 198, 200, 201, 203,
+ 205, 206, 208, 209, 211, 212, 214, 216,
+ 217, 219, 220, 222, 224, 225, 227, 228,
+ 230, 232, 233, 235, 237, 238, 240, 242,
+ 243, 245, 247, 248, 250, 252, 253, 255,
+};
+
+static const u16 xgamma8_18[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 8, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 30, 31,
+ 32, 32, 33, 34, 35, 35, 36, 37,
+ 38, 38, 39, 40, 41, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 86, 87, 88, 89, 90,
+ 91, 92, 93, 95, 96, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 110, 111, 113, 114, 115, 116, 118, 119,
+ 120, 122, 123, 124, 126, 127, 128, 129,
+ 131, 132, 134, 135, 136, 138, 139, 140,
+ 142, 143, 145, 146, 147, 149, 150, 152,
+ 153, 154, 156, 157, 159, 160, 162, 163,
+ 165, 166, 168, 169, 171, 172, 174, 175,
+ 177, 178, 180, 181, 183, 184, 186, 188,
+ 189, 191, 192, 194, 195, 197, 199, 200,
+ 202, 204, 205, 207, 208, 210, 212, 213,
+ 215, 217, 218, 220, 222, 224, 225, 227,
+ 229, 230, 232, 234, 236, 237, 239, 241,
+ 243, 244, 246, 248, 250, 251, 253, 255,
+};
+
+static const u16 xgamma8_19[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 4, 4, 4, 4, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 24, 24, 25, 26, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 32, 33,
+ 34, 35, 35, 36, 37, 38, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 66, 67, 68,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 81, 82, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 94,
+ 95, 97, 98, 99, 100, 101, 103, 104,
+ 105, 106, 108, 109, 110, 112, 113, 114,
+ 115, 117, 118, 119, 121, 122, 123, 125,
+ 126, 127, 129, 130, 132, 133, 134, 136,
+ 137, 139, 140, 141, 143, 144, 146, 147,
+ 149, 150, 152, 153, 155, 156, 158, 159,
+ 161, 162, 164, 165, 167, 168, 170, 172,
+ 173, 175, 176, 178, 180, 181, 183, 184,
+ 186, 188, 189, 191, 193, 194, 196, 198,
+ 199, 201, 203, 204, 206, 208, 210, 211,
+ 213, 215, 217, 218, 220, 222, 224, 225,
+ 227, 229, 231, 233, 235, 236, 238, 240,
+ 242, 244, 246, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_20[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 23, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 30, 31, 32, 32, 33, 34, 35, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 80,
+ 81, 82, 84, 85, 86, 87, 88, 89,
+ 91, 92, 93, 94, 95, 97, 98, 99,
+ 100, 102, 103, 104, 105, 107, 108, 109,
+ 111, 112, 113, 115, 116, 117, 119, 120,
+ 121, 123, 124, 126, 127, 128, 130, 131,
+ 133, 134, 136, 137, 139, 140, 142, 143,
+ 145, 146, 148, 149, 151, 152, 154, 155,
+ 157, 158, 160, 162, 163, 165, 166, 168,
+ 170, 171, 173, 175, 176, 178, 180, 181,
+ 183, 185, 186, 188, 190, 192, 193, 195,
+ 197, 199, 200, 202, 204, 206, 207, 209,
+ 211, 213, 215, 217, 218, 220, 222, 224,
+ 226, 228, 230, 232, 233, 235, 237, 239,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_21[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 14,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 20, 20, 21, 21, 22,
+ 22, 23, 24, 24, 25, 25, 26, 27,
+ 27, 28, 29, 29, 30, 31, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 44,
+ 45, 46, 47, 48, 49, 50, 51, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 75, 76,
+ 77, 78, 79, 80, 81, 83, 84, 85,
+ 86, 87, 88, 90, 91, 92, 93, 95,
+ 96, 97, 98, 100, 101, 102, 104, 105,
+ 106, 107, 109, 110, 112, 113, 114, 116,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 130, 131, 133, 134, 136, 137, 139,
+ 141, 142, 144, 145, 147, 148, 150, 151,
+ 153, 155, 156, 158, 160, 161, 163, 165,
+ 166, 168, 170, 171, 173, 175, 176, 178,
+ 180, 182, 183, 185, 187, 189, 191, 192,
+ 194, 196, 198, 200, 202, 203, 205, 207,
+ 209, 211, 213, 215, 217, 219, 221, 223,
+ 225, 226, 228, 230, 232, 234, 236, 238,
+ 241, 243, 245, 247, 249, 251, 253, 255,
+};
+
+static const u16 xgamma8_22[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 33, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 73, 74, 75, 76, 77, 78, 79, 81,
+ 82, 83, 84, 85, 87, 88, 89, 90,
+ 91, 93, 94, 95, 97, 98, 99, 100,
+ 102, 103, 105, 106, 107, 109, 110, 111,
+ 113, 114, 116, 117, 119, 120, 121, 123,
+ 124, 126, 127, 129, 130, 132, 133, 135,
+ 137, 138, 140, 141, 143, 145, 146, 148,
+ 149, 151, 153, 154, 156, 158, 159, 161,
+ 163, 165, 166, 168, 170, 172, 173, 175,
+ 177, 179, 181, 182, 184, 186, 188, 190,
+ 192, 194, 196, 197, 199, 201, 203, 205,
+ 207, 209, 211, 213, 215, 217, 219, 221,
+ 223, 225, 227, 229, 231, 234, 236, 238,
+ 240, 242, 244, 246, 248, 251, 253, 255,
+};
+
+static const u16 xgamma8_23[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 10, 10, 10,
+ 11, 11, 11, 12, 12, 13, 13, 13,
+ 14, 14, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 24, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 38, 39, 40, 41, 42, 42, 43, 44,
+ 45, 46, 47, 48, 49, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67,
+ 69, 70, 71, 72, 73, 74, 75, 76,
+ 78, 79, 80, 81, 82, 84, 85, 86,
+ 87, 89, 90, 91, 92, 94, 95, 96,
+ 98, 99, 100, 102, 103, 104, 106, 107,
+ 109, 110, 112, 113, 114, 116, 117, 119,
+ 120, 122, 123, 125, 126, 128, 130, 131,
+ 133, 134, 136, 138, 139, 141, 143, 144,
+ 146, 148, 149, 151, 153, 154, 156, 158,
+ 160, 161, 163, 165, 167, 169, 170, 172,
+ 174, 176, 178, 180, 182, 183, 185, 187,
+ 189, 191, 193, 195, 197, 199, 201, 203,
+ 205, 207, 209, 211, 213, 215, 218, 220,
+ 222, 224, 226, 228, 230, 233, 235, 237,
+ 239, 241, 244, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_24[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 28, 29,
+ 30, 30, 31, 32, 32, 33, 34, 35,
+ 35, 36, 37, 38, 39, 39, 40, 41,
+ 42, 43, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 53, 54, 55,
+ 56, 57, 58, 59, 60, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 73,
+ 74, 75, 76, 77, 78, 80, 81, 82,
+ 83, 85, 86, 87, 88, 90, 91, 92,
+ 94, 95, 96, 98, 99, 100, 102, 103,
+ 105, 106, 108, 109, 111, 112, 114, 115,
+ 117, 118, 120, 121, 123, 124, 126, 127,
+ 129, 131, 132, 134, 136, 137, 139, 141,
+ 142, 144, 146, 148, 149, 151, 153, 155,
+ 156, 158, 160, 162, 164, 166, 167, 169,
+ 171, 173, 175, 177, 179, 181, 183, 185,
+ 187, 189, 191, 193, 195, 197, 199, 201,
+ 203, 205, 207, 210, 212, 214, 216, 218,
+ 220, 223, 225, 227, 229, 232, 234, 236,
+ 239, 241, 243, 246, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_25[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 7, 8,
+ 8, 8, 9, 9, 9, 10, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 14,
+ 14, 15, 15, 15, 16, 16, 17, 17,
+ 18, 18, 19, 19, 20, 20, 21, 22,
+ 22, 23, 23, 24, 25, 25, 26, 26,
+ 27, 28, 28, 29, 30, 30, 31, 32,
+ 33, 33, 34, 35, 36, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 46, 47, 48, 49, 50, 51, 52,
+ 53, 54, 55, 56, 57, 58, 59, 60,
+ 61, 62, 63, 64, 65, 67, 68, 69,
+ 70, 71, 72, 73, 75, 76, 77, 78,
+ 80, 81, 82, 83, 85, 86, 87, 89,
+ 90, 91, 93, 94, 95, 97, 98, 99,
+ 101, 102, 104, 105, 107, 108, 110, 111,
+ 113, 114, 116, 117, 119, 121, 122, 124,
+ 125, 127, 129, 130, 132, 134, 135, 137,
+ 139, 141, 142, 144, 146, 148, 150, 151,
+ 153, 155, 157, 159, 161, 163, 165, 166,
+ 168, 170, 172, 174, 176, 178, 180, 182,
+ 184, 186, 189, 191, 193, 195, 197, 199,
+ 201, 204, 206, 208, 210, 212, 215, 217,
+ 219, 221, 224, 226, 228, 231, 233, 235,
+ 238, 240, 243, 245, 248, 250, 253, 255,
+};
+
+static const u16 xgamma8_26[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 20, 21, 21, 22, 22, 23, 24, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 31, 32, 33, 34, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 68, 69, 70, 71, 72, 73, 75,
+ 76, 77, 78, 80, 81, 82, 84, 85,
+ 86, 88, 89, 90, 92, 93, 94, 96,
+ 97, 99, 100, 102, 103, 105, 106, 108,
+ 109, 111, 112, 114, 115, 117, 119, 120,
+ 122, 124, 125, 127, 129, 130, 132, 134,
+ 136, 137, 139, 141, 143, 145, 146, 148,
+ 150, 152, 154, 156, 158, 160, 162, 164,
+ 166, 168, 170, 172, 174, 176, 178, 180,
+ 182, 184, 186, 188, 191, 193, 195, 197,
+ 199, 202, 204, 206, 209, 211, 213, 215,
+ 218, 220, 223, 225, 227, 230, 232, 235,
+ 237, 240, 242, 245, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_27[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 21, 22,
+ 23, 23, 24, 24, 25, 26, 26, 27,
+ 28, 28, 29, 30, 30, 31, 32, 33,
+ 33, 34, 35, 36, 36, 37, 38, 39,
+ 40, 41, 41, 42, 43, 44, 45, 46,
+ 47, 48, 49, 50, 51, 51, 52, 53,
+ 55, 56, 57, 58, 59, 60, 61, 62,
+ 63, 64, 65, 66, 68, 69, 70, 71,
+ 72, 74, 75, 76, 77, 79, 80, 81,
+ 83, 84, 85, 87, 88, 89, 91, 92,
+ 94, 95, 97, 98, 100, 101, 103, 104,
+ 106, 107, 109, 110, 112, 114, 115, 117,
+ 119, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 138, 140, 141, 143, 145,
+ 147, 149, 151, 153, 155, 157, 159, 161,
+ 163, 165, 167, 169, 171, 173, 175, 178,
+ 180, 182, 184, 186, 188, 191, 193, 195,
+ 198, 200, 202, 205, 207, 209, 212, 214,
+ 216, 219, 221, 224, 226, 229, 231, 234,
+ 237, 239, 242, 244, 247, 250, 252, 255,
+};
+
+static const u16 xgamma8_28[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7,
+ 7, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 11, 12, 12, 13,
+ 13, 13, 14, 14, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 20,
+ 21, 21, 22, 22, 23, 24, 24, 25,
+ 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 32, 32, 33, 34, 35, 35, 36,
+ 37, 38, 39, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 50,
+ 51, 52, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 66, 67, 68,
+ 69, 70, 72, 73, 74, 75, 77, 78,
+ 79, 81, 82, 83, 85, 86, 87, 89,
+ 90, 92, 93, 95, 96, 98, 99, 101,
+ 102, 104, 105, 107, 109, 110, 112, 114,
+ 115, 117, 119, 120, 122, 124, 126, 127,
+ 129, 131, 133, 135, 137, 138, 140, 142,
+ 144, 146, 148, 150, 152, 154, 156, 158,
+ 160, 162, 164, 167, 169, 171, 173, 175,
+ 177, 180, 182, 184, 186, 189, 191, 193,
+ 196, 198, 200, 203, 205, 208, 210, 213,
+ 215, 218, 220, 223, 225, 228, 231, 233,
+ 236, 239, 241, 244, 247, 249, 252, 255,
+};
+
+static const u16 xgamma8_29[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 9,
+ 9, 9, 9, 10, 10, 11, 11, 11,
+ 12, 12, 12, 13, 13, 14, 14, 15,
+ 15, 15, 16, 16, 17, 17, 18, 18,
+ 19, 19, 20, 21, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28,
+ 29, 29, 30, 31, 32, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62, 64, 65,
+ 66, 67, 68, 70, 71, 72, 73, 75,
+ 76, 77, 79, 80, 81, 83, 84, 86,
+ 87, 88, 90, 91, 93, 94, 96, 97,
+ 99, 101, 102, 104, 105, 107, 109, 110,
+ 112, 114, 115, 117, 119, 121, 122, 124,
+ 126, 128, 130, 132, 134, 135, 137, 139,
+ 141, 143, 145, 147, 149, 151, 153, 155,
+ 158, 160, 162, 164, 166, 168, 171, 173,
+ 175, 177, 180, 182, 184, 187, 189, 191,
+ 194, 196, 199, 201, 204, 206, 209, 211,
+ 214, 216, 219, 222, 224, 227, 230, 232,
+ 235, 238, 241, 244, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_30[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 7, 7, 7, 8,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 12, 12, 12, 13, 13,
+ 14, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21,
+ 22, 22, 23, 23, 24, 25, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 32, 33, 34, 35, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 60, 61, 62,
+ 63, 64, 65, 67, 68, 69, 70, 72,
+ 73, 74, 76, 77, 78, 80, 81, 82,
+ 84, 85, 87, 88, 90, 91, 93, 94,
+ 96, 97, 99, 101, 102, 104, 105, 107,
+ 109, 111, 112, 114, 116, 118, 119, 121,
+ 123, 125, 127, 129, 131, 132, 134, 136,
+ 138, 140, 142, 144, 147, 149, 151, 153,
+ 155, 157, 159, 162, 164, 166, 168, 171,
+ 173, 175, 178, 180, 182, 185, 187, 190,
+ 192, 195, 197, 200, 202, 205, 207, 210,
+ 213, 215, 218, 221, 223, 226, 229, 232,
+ 235, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_31[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7,
+ 7, 7, 8, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 23, 24,
+ 25, 25, 26, 27, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 36, 37, 38, 39, 40, 41, 42, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 56, 57, 58, 59,
+ 60, 61, 62, 64, 65, 66, 67, 69,
+ 70, 71, 73, 74, 75, 77, 78, 79,
+ 81, 82, 84, 85, 87, 88, 90, 91,
+ 93, 94, 96, 97, 99, 101, 102, 104,
+ 106, 108, 109, 111, 113, 115, 116, 118,
+ 120, 122, 124, 126, 128, 130, 132, 134,
+ 136, 138, 140, 142, 144, 146, 148, 150,
+ 152, 155, 157, 159, 161, 164, 166, 168,
+ 171, 173, 175, 178, 180, 183, 185, 188,
+ 190, 193, 195, 198, 201, 203, 206, 209,
+ 211, 214, 217, 220, 222, 225, 228, 231,
+ 234, 237, 240, 243, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_32[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 27,
+ 28, 29, 30, 30, 31, 32, 33, 33,
+ 34, 35, 36, 37, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 59, 60, 61, 62, 63, 65, 66,
+ 67, 68, 70, 71, 72, 74, 75, 76,
+ 78, 79, 81, 82, 84, 85, 87, 88,
+ 90, 91, 93, 95, 96, 98, 99, 101,
+ 103, 105, 106, 108, 110, 112, 113, 115,
+ 117, 119, 121, 123, 125, 127, 129, 131,
+ 133, 135, 137, 139, 141, 143, 146, 148,
+ 150, 152, 154, 157, 159, 161, 164, 166,
+ 168, 171, 173, 176, 178, 181, 183, 186,
+ 188, 191, 194, 196, 199, 202, 204, 207,
+ 210, 213, 216, 219, 221, 224, 227, 230,
+ 233, 236, 239, 242, 246, 249, 252, 255,
+};
+
+static const u16 xgamma8_33[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 10,
+ 10, 11, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 19, 20, 21,
+ 21, 22, 22, 23, 24, 24, 25, 26,
+ 26, 27, 28, 28, 29, 30, 31, 31,
+ 32, 33, 34, 34, 35, 36, 37, 38,
+ 39, 40, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, 53, 54,
+ 55, 56, 57, 58, 59, 61, 62, 63,
+ 64, 66, 67, 68, 70, 71, 72, 74,
+ 75, 76, 78, 79, 81, 82, 84, 85,
+ 87, 88, 90, 92, 93, 95, 97, 98,
+ 100, 102, 103, 105, 107, 109, 111, 113,
+ 114, 116, 118, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 139, 141, 143, 145,
+ 147, 150, 152, 154, 157, 159, 161, 164,
+ 166, 169, 171, 174, 176, 179, 181, 184,
+ 187, 189, 192, 195, 198, 200, 203, 206,
+ 209, 212, 215, 217, 220, 223, 226, 230,
+ 233, 236, 239, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_34[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 12, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 21, 22, 23, 23, 24,
+ 24, 25, 26, 26, 27, 28, 29, 29,
+ 30, 31, 32, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 59, 60,
+ 62, 63, 64, 66, 67, 68, 70, 71,
+ 72, 74, 75, 77, 78, 80, 81, 83,
+ 84, 86, 87, 89, 90, 92, 94, 95,
+ 97, 99, 101, 102, 104, 106, 108, 110,
+ 112, 114, 115, 117, 119, 121, 123, 125,
+ 128, 130, 132, 134, 136, 138, 141, 143,
+ 145, 147, 150, 152, 154, 157, 159, 162,
+ 164, 167, 169, 172, 174, 177, 180, 182,
+ 185, 188, 190, 193, 196, 199, 202, 205,
+ 208, 210, 213, 216, 219, 223, 226, 229,
+ 232, 235, 238, 242, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_35[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 7, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 11, 12, 12, 13, 13, 13, 14,
+ 14, 15, 15, 16, 16, 17, 17, 18,
+ 18, 19, 19, 20, 20, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 28, 29, 30, 30, 31, 32, 33, 34,
+ 35, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 53, 54, 56, 57, 58,
+ 59, 60, 62, 63, 64, 66, 67, 68,
+ 70, 71, 72, 74, 75, 77, 78, 80,
+ 81, 83, 85, 86, 88, 89, 91, 93,
+ 94, 96, 98, 100, 102, 103, 105, 107,
+ 109, 111, 113, 115, 117, 119, 121, 123,
+ 125, 127, 129, 131, 134, 136, 138, 140,
+ 143, 145, 147, 150, 152, 155, 157, 159,
+ 162, 165, 167, 170, 172, 175, 178, 180,
+ 183, 186, 189, 192, 194, 197, 200, 203,
+ 206, 209, 212, 215, 219, 222, 225, 228,
+ 231, 235, 238, 241, 245, 248, 252, 255,
+};
+
+static const u16 xgamma8_36[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 7, 7, 7,
+ 8, 8, 8, 8, 9, 9, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 13,
+ 13, 14, 14, 15, 15, 15, 16, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21,
+ 21, 22, 23, 23, 24, 24, 25, 26,
+ 27, 27, 28, 29, 29, 30, 31, 32,
+ 33, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 42, 43, 44, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 56,
+ 57, 58, 59, 61, 62, 63, 64, 66,
+ 67, 69, 70, 71, 73, 74, 76, 77,
+ 79, 80, 82, 83, 85, 87, 88, 90,
+ 92, 94, 95, 97, 99, 101, 103, 104,
+ 106, 108, 110, 112, 114, 116, 118, 120,
+ 122, 125, 127, 129, 131, 133, 136, 138,
+ 140, 143, 145, 147, 150, 152, 155, 157,
+ 160, 162, 165, 168, 170, 173, 176, 179,
+ 181, 184, 187, 190, 193, 196, 199, 202,
+ 205, 208, 211, 214, 218, 221, 224, 227,
+ 231, 234, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_37[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 10, 10, 10, 11, 11, 11, 12,
+ 12, 13, 13, 13, 14, 14, 15, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 21, 22, 22, 23, 24, 24,
+ 25, 26, 26, 27, 28, 28, 29, 30,
+ 31, 32, 32, 33, 34, 35, 36, 37,
+ 38, 39, 39, 40, 41, 42, 43, 44,
+ 45, 47, 48, 49, 50, 51, 52, 53,
+ 54, 56, 57, 58, 59, 61, 62, 63,
+ 65, 66, 67, 69, 70, 72, 73, 75,
+ 76, 78, 79, 81, 83, 84, 86, 88,
+ 89, 91, 93, 95, 96, 98, 100, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 120, 122, 124, 127, 129, 131, 133, 136,
+ 138, 140, 143, 145, 148, 150, 153, 155,
+ 158, 160, 163, 166, 169, 171, 174, 177,
+ 180, 183, 186, 188, 191, 194, 198, 201,
+ 204, 207, 210, 213, 217, 220, 223, 227,
+ 230, 233, 237, 241, 244, 248, 251, 255,
+};
+
+static const u16 xgamma8_38[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 7, 7, 7, 8, 8, 8,
+ 8, 9, 9, 9, 10, 10, 10, 11,
+ 11, 12, 12, 12, 13, 13, 14, 14,
+ 15, 15, 15, 16, 16, 17, 18, 18,
+ 19, 19, 20, 20, 21, 21, 22, 23,
+ 23, 24, 25, 25, 26, 27, 28, 28,
+ 29, 30, 31, 31, 32, 33, 34, 35,
+ 36, 37, 38, 38, 39, 40, 41, 42,
+ 43, 44, 45, 47, 48, 49, 50, 51,
+ 52, 53, 55, 56, 57, 58, 60, 61,
+ 62, 64, 65, 66, 68, 69, 71, 72,
+ 74, 75, 77, 78, 80, 82, 83, 85,
+ 87, 88, 90, 92, 94, 96, 98, 99,
+ 101, 103, 105, 107, 109, 111, 113, 115,
+ 118, 120, 122, 124, 126, 129, 131, 133,
+ 136, 138, 141, 143, 146, 148, 151, 153,
+ 156, 158, 161, 164, 167, 169, 172, 175,
+ 178, 181, 184, 187, 190, 193, 196, 199,
+ 203, 206, 209, 212, 216, 219, 222, 226,
+ 229, 233, 237, 240, 244, 247, 251, 255,
+};
+
+static const u16 xgamma8_39[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 9, 9, 9, 10, 10,
+ 10, 11, 11, 11, 12, 12, 13, 13,
+ 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 20, 20, 21, 21,
+ 22, 23, 23, 24, 25, 25, 26, 27,
+ 27, 28, 29, 30, 31, 31, 32, 33,
+ 34, 35, 36, 37, 38, 38, 39, 40,
+ 41, 42, 43, 45, 46, 47, 48, 49,
+ 50, 51, 52, 54, 55, 56, 57, 59,
+ 60, 61, 63, 64, 66, 67, 68, 70,
+ 71, 73, 74, 76, 78, 79, 81, 83,
+ 84, 86, 88, 90, 91, 93, 95, 97,
+ 99, 101, 103, 105, 107, 109, 111, 113,
+ 115, 117, 120, 122, 124, 126, 129, 131,
+ 133, 136, 138, 141, 143, 146, 149, 151,
+ 154, 157, 159, 162, 165, 168, 171, 173,
+ 176, 179, 182, 185, 189, 192, 195, 198,
+ 201, 205, 208, 211, 215, 218, 222, 225,
+ 229, 232, 236, 240, 243, 247, 251, 255,
+};
+
+static const u16 xgamma8_40[GAMMA8_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 11, 11, 11, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 16,
+ 16, 17, 17, 18, 18, 19, 19, 20,
+ 21, 21, 22, 23, 23, 24, 25, 25,
+ 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 52, 53, 54, 55, 57,
+ 58, 59, 61, 62, 63, 65, 66, 68,
+ 69, 71, 72, 74, 75, 77, 79, 80,
+ 82, 84, 85, 87, 89, 91, 93, 95,
+ 96, 98, 100, 102, 104, 107, 109, 111,
+ 113, 115, 117, 120, 122, 124, 126, 129,
+ 131, 134, 136, 139, 141, 144, 146, 149,
+ 152, 155, 157, 160, 163, 166, 169, 172,
+ 175, 178, 181, 184, 187, 190, 194, 197,
+ 200, 203, 207, 210, 214, 217, 221, 224,
+ 228, 232, 236, 239, 243, 247, 251, 255,
+};
+
+static const u16 *xgamma8_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma8_01[0],
+ &xgamma8_02[0],
+ &xgamma8_03[0],
+ &xgamma8_04[0],
+ &xgamma8_05[0],
+ &xgamma8_06[0],
+ &xgamma8_07[0],
+ &xgamma8_08[0],
+ &xgamma8_09[0],
+ &xgamma8_10[0],
+ &xgamma8_11[0],
+ &xgamma8_12[0],
+ &xgamma8_13[0],
+ &xgamma8_14[0],
+ &xgamma8_15[0],
+ &xgamma8_16[0],
+ &xgamma8_17[0],
+ &xgamma8_18[0],
+ &xgamma8_19[0],
+ &xgamma8_20[0],
+ &xgamma8_21[0],
+ &xgamma8_22[0],
+ &xgamma8_23[0],
+ &xgamma8_24[0],
+ &xgamma8_25[0],
+ &xgamma8_26[0],
+ &xgamma8_27[0],
+ &xgamma8_28[0],
+ &xgamma8_29[0],
+ &xgamma8_30[0],
+ &xgamma8_31[0],
+ &xgamma8_32[0],
+ &xgamma8_33[0],
+ &xgamma8_34[0],
+ &xgamma8_35[0],
+ &xgamma8_36[0],
+ &xgamma8_37[0],
+ &xgamma8_38[0],
+ &xgamma8_39[0],
+ &xgamma8_40[0],
+};
+
+#define GAMMA_BPC_10 (10)
+#define GAMMA10_TABLE_LENGTH BIT(GAMMA_BPC_10)
+static const u16 xgamma10_01[GAMMA10_TABLE_LENGTH] = {
+ 0, 512, 548, 571, 588, 601, 612, 621, 630, 637, 644,
+ 650, 656, 661, 666, 671, 675, 679, 683, 687, 690, 694,
+ 697, 700, 703, 706, 709, 711, 714, 716, 719, 721, 723,
+ 726, 728, 730, 732, 734, 736, 738, 740, 742, 743, 745,
+ 747, 749, 750, 752, 753, 755, 756, 758, 759, 761, 762,
+ 764, 765, 766, 768, 769, 770, 772, 773, 774, 775, 777,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800,
+ 800, 801, 802, 803, 804, 805, 806, 807, 807, 808, 809,
+ 810, 811, 812, 812, 813, 814, 815, 815, 816, 817, 818,
+ 819, 819, 820, 821, 821, 822, 823, 824, 824, 825, 826,
+ 826, 827, 828, 828, 829, 830, 830, 831, 832, 832, 833,
+ 834, 834, 835, 835, 836, 837, 837, 838, 838, 839, 840,
+ 840, 841, 841, 842, 843, 843, 844, 844, 845, 845, 846,
+ 847, 847, 848, 848, 849, 849, 850, 850, 851, 851, 852,
+ 852, 853, 853, 854, 854, 855, 855, 856, 856, 857, 857,
+ 858, 858, 859, 859, 860, 860, 861, 861, 862, 862, 863,
+ 863, 864, 864, 864, 865, 865, 866, 866, 867, 867, 868,
+ 868, 869, 869, 869, 870, 870, 871, 871, 872, 872, 872,
+ 873, 873, 874, 874, 874, 875, 875, 876, 876, 876, 877,
+ 877, 878, 878, 878, 879, 879, 880, 880, 880, 881, 881,
+ 882, 882, 882, 883, 883, 883, 884, 884, 885, 885, 885,
+ 886, 886, 886, 887, 887, 887, 888, 888, 889, 889, 889,
+ 890, 890, 890, 891, 891, 891, 892, 892, 892, 893, 893,
+ 893, 894, 894, 894, 895, 895, 895, 896, 896, 896, 897,
+ 897, 897, 898, 898, 898, 899, 899, 899, 900, 900, 900,
+ 901, 901, 901, 902, 902, 902, 902, 903, 903, 903, 904,
+ 904, 904, 905, 905, 905, 906, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 910, 911, 911, 911, 912, 912, 912, 912, 913, 913, 913,
+ 914, 914, 914, 914, 915, 915, 915, 915, 916, 916, 916,
+ 917, 917, 917, 917, 918, 918, 918, 918, 919, 919, 919,
+ 919, 920, 920, 920, 921, 921, 921, 921, 922, 922, 922,
+ 922, 923, 923, 923, 923, 924, 924, 924, 924, 925, 925,
+ 925, 925, 926, 926, 926, 926, 927, 927, 927, 927, 928,
+ 928, 928, 928, 928, 929, 929, 929, 929, 930, 930, 930,
+ 930, 931, 931, 931, 931, 932, 932, 932, 932, 932, 933,
+ 933, 933, 933, 934, 934, 934, 934, 935, 935, 935, 935,
+ 935, 936, 936, 936, 936, 937, 937, 937, 937, 937, 938,
+ 938, 938, 938, 939, 939, 939, 939, 939, 940, 940, 940,
+ 940, 940, 941, 941, 941, 941, 942, 942, 942, 942, 942,
+ 943, 943, 943, 943, 943, 944, 944, 944, 944, 944, 945,
+ 945, 945, 945, 945, 946, 946, 946, 946, 946, 947, 947,
+ 947, 947, 947, 948, 948, 948, 948, 948, 949, 949, 949,
+ 949, 949, 950, 950, 950, 950, 950, 951, 951, 951, 951,
+ 951, 952, 952, 952, 952, 952, 953, 953, 953, 953, 953,
+ 953, 954, 954, 954, 954, 954, 955, 955, 955, 955, 955,
+ 956, 956, 956, 956, 956, 956, 957, 957, 957, 957, 957,
+ 958, 958, 958, 958, 958, 958, 959, 959, 959, 959, 959,
+ 960, 960, 960, 960, 960, 960, 961, 961, 961, 961, 961,
+ 961, 962, 962, 962, 962, 962, 962, 963, 963, 963, 963,
+ 963, 964, 964, 964, 964, 964, 964, 965, 965, 965, 965,
+ 965, 965, 966, 966, 966, 966, 966, 966, 967, 967, 967,
+ 967, 967, 967, 968, 968, 968, 968, 968, 968, 969, 969,
+ 969, 969, 969, 969, 970, 970, 970, 970, 970, 970, 970,
+ 971, 971, 971, 971, 971, 971, 972, 972, 972, 972, 972,
+ 972, 973, 973, 973, 973, 973, 973, 974, 974, 974, 974,
+ 974, 974, 974, 975, 975, 975, 975, 975, 975, 976, 976,
+ 976, 976, 976, 976, 976, 977, 977, 977, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 978, 979, 979, 979, 979,
+ 979, 979, 979, 980, 980, 980, 980, 980, 980, 980, 981,
+ 981, 981, 981, 981, 981, 981, 982, 982, 982, 982, 982,
+ 982, 982, 983, 983, 983, 983, 983, 983, 983, 984, 984,
+ 984, 984, 984, 984, 984, 985, 985, 985, 985, 985, 985,
+ 985, 986, 986, 986, 986, 986, 986, 986, 987, 987, 987,
+ 987, 987, 987, 987, 988, 988, 988, 988, 988, 988, 988,
+ 989, 989, 989, 989, 989, 989, 989, 989, 990, 990, 990,
+ 990, 990, 990, 990, 991, 991, 991, 991, 991, 991, 991,
+ 991, 992, 992, 992, 992, 992, 992, 992, 993, 993, 993,
+ 993, 993, 993, 993, 993, 994, 994, 994, 994, 994, 994,
+ 994, 994, 995, 995, 995, 995, 995, 995, 995, 996, 996,
+ 996, 996, 996, 996, 996, 996, 997, 997, 997, 997, 997,
+ 997, 997, 997, 998, 998, 998, 998, 998, 998, 998, 998,
+ 999, 999, 999, 999, 999, 999, 999, 999, 1000, 1000, 1000,
+ 1000, 1000, 1000, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1001,
+ 1001, 1001, 1001, 1002, 1002, 1002, 1002, 1002, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004,
+ 1004, 1004, 1004, 1004, 1004, 1004, 1005, 1005, 1005, 1005, 1005,
+ 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1006, 1006, 1006,
+ 1006, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1007, 1008,
+ 1008, 1008, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1009, 1009, 1009, 1009, 1010, 1010, 1010, 1010, 1010, 1010,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1011, 1011, 1011, 1011,
+ 1011, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1013, 1014, 1014, 1014,
+ 1014, 1014, 1014, 1014, 1014, 1014, 1014, 1015, 1015, 1015, 1015,
+ 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016, 1016, 1016, 1016,
+ 1016, 1016, 1016, 1017, 1017, 1017, 1017, 1017, 1017, 1017, 1017,
+ 1017, 1017, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1019, 1020,
+ 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1020, 1021, 1021,
+ 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1021, 1022, 1022, 1022,
+ 1022, 1022, 1022, 1022, 1022, 1022, 1022, 1023, 1023, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_02[GAMMA10_TABLE_LENGTH] = {
+ 0, 256, 294, 319, 338, 353, 366, 378, 388, 397, 405,
+ 413, 420, 427, 434, 440, 445, 451, 456, 461, 466, 470,
+ 475, 479, 483, 487, 491, 495, 498, 502, 505, 508, 512,
+ 515, 518, 521, 524, 527, 529, 532, 535, 538, 540, 543,
+ 545, 548, 550, 552, 555, 557, 559, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 589,
+ 591, 593, 595, 597, 598, 600, 602, 603, 605, 607, 608,
+ 610, 611, 613, 614, 616, 618, 619, 621, 622, 623, 625,
+ 626, 628, 629, 631, 632, 633, 635, 636, 637, 639, 640,
+ 641, 643, 644, 645, 646, 648, 649, 650, 651, 653, 654,
+ 655, 656, 657, 658, 660, 661, 662, 663, 664, 665, 666,
+ 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 700, 701, 702, 703, 704, 705, 706, 707, 708, 708, 709,
+ 710, 711, 712, 713, 714, 714, 715, 716, 717, 718, 719,
+ 719, 720, 721, 722, 723, 723, 724, 725, 726, 727, 727,
+ 728, 729, 730, 731, 731, 732, 733, 734, 734, 735, 736,
+ 737, 737, 738, 739, 740, 740, 741, 742, 742, 743, 744,
+ 745, 745, 746, 747, 747, 748, 749, 750, 750, 751, 752,
+ 752, 753, 754, 754, 755, 756, 756, 757, 758, 758, 759,
+ 760, 760, 761, 762, 762, 763, 764, 764, 765, 765, 766,
+ 767, 767, 768, 769, 769, 770, 771, 771, 772, 772, 773,
+ 774, 774, 775, 775, 776, 777, 777, 778, 778, 779, 780,
+ 780, 781, 781, 782, 783, 783, 784, 784, 785, 785, 786,
+ 787, 787, 788, 788, 789, 789, 790, 791, 791, 792, 792,
+ 793, 793, 794, 794, 795, 796, 796, 797, 797, 798, 798,
+ 799, 799, 800, 800, 801, 801, 802, 803, 803, 804, 804,
+ 805, 805, 806, 806, 807, 807, 808, 808, 809, 809, 810,
+ 810, 811, 811, 812, 812, 813, 813, 814, 814, 815, 815,
+ 816, 816, 817, 817, 818, 818, 819, 819, 820, 820, 821,
+ 821, 822, 822, 823, 823, 824, 824, 825, 825, 825, 826,
+ 826, 827, 827, 828, 828, 829, 829, 830, 830, 831, 831,
+ 832, 832, 832, 833, 833, 834, 834, 835, 835, 836, 836,
+ 837, 837, 837, 838, 838, 839, 839, 840, 840, 841, 841,
+ 841, 842, 842, 843, 843, 844, 844, 844, 845, 845, 846,
+ 846, 847, 847, 847, 848, 848, 849, 849, 850, 850, 850,
+ 851, 851, 852, 852, 852, 853, 853, 854, 854, 855, 855,
+ 855, 856, 856, 857, 857, 857, 858, 858, 859, 859, 859,
+ 860, 860, 861, 861, 861, 862, 862, 863, 863, 863, 864,
+ 864, 865, 865, 865, 866, 866, 866, 867, 867, 868, 868,
+ 868, 869, 869, 870, 870, 870, 871, 871, 871, 872, 872,
+ 873, 873, 873, 874, 874, 875, 875, 875, 876, 876, 876,
+ 877, 877, 877, 878, 878, 879, 879, 879, 880, 880, 880,
+ 881, 881, 882, 882, 882, 883, 883, 883, 884, 884, 884,
+ 885, 885, 885, 886, 886, 887, 887, 887, 888, 888, 888,
+ 889, 889, 889, 890, 890, 890, 891, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 894, 895, 895, 895, 896, 896,
+ 896, 897, 897, 897, 898, 898, 898, 899, 899, 899, 900,
+ 900, 900, 901, 901, 901, 902, 902, 902, 903, 903, 903,
+ 904, 904, 904, 905, 905, 905, 906, 906, 906, 907, 907,
+ 907, 908, 908, 908, 908, 909, 909, 909, 910, 910, 910,
+ 911, 911, 911, 912, 912, 912, 913, 913, 913, 914, 914,
+ 914, 914, 915, 915, 915, 916, 916, 916, 917, 917, 917,
+ 918, 918, 918, 919, 919, 919, 919, 920, 920, 920, 921,
+ 921, 921, 922, 922, 922, 923, 923, 923, 923, 924, 924,
+ 924, 925, 925, 925, 926, 926, 926, 926, 927, 927, 927,
+ 928, 928, 928, 928, 929, 929, 929, 930, 930, 930, 931,
+ 931, 931, 931, 932, 932, 932, 933, 933, 933, 933, 934,
+ 934, 934, 935, 935, 935, 935, 936, 936, 936, 937, 937,
+ 937, 937, 938, 938, 938, 939, 939, 939, 939, 940, 940,
+ 940, 941, 941, 941, 941, 942, 942, 942, 942, 943, 943,
+ 943, 944, 944, 944, 944, 945, 945, 945, 946, 946, 946,
+ 946, 947, 947, 947, 947, 948, 948, 948, 949, 949, 949,
+ 949, 950, 950, 950, 950, 951, 951, 951, 951, 952, 952,
+ 952, 953, 953, 953, 953, 954, 954, 954, 954, 955, 955,
+ 955, 955, 956, 956, 956, 956, 957, 957, 957, 958, 958,
+ 958, 958, 959, 959, 959, 959, 960, 960, 960, 960, 961,
+ 961, 961, 961, 962, 962, 962, 962, 963, 963, 963, 963,
+ 964, 964, 964, 964, 965, 965, 965, 965, 966, 966, 966,
+ 966, 967, 967, 967, 967, 968, 968, 968, 968, 969, 969,
+ 969, 969, 970, 970, 970, 970, 971, 971, 971, 971, 972,
+ 972, 972, 972, 973, 973, 973, 973, 974, 974, 974, 974,
+ 975, 975, 975, 975, 976, 976, 976, 976, 977, 977, 977,
+ 977, 978, 978, 978, 978, 978, 979, 979, 979, 979, 980,
+ 980, 980, 980, 981, 981, 981, 981, 982, 982, 982, 982,
+ 983, 983, 983, 983, 983, 984, 984, 984, 984, 985, 985,
+ 985, 985, 986, 986, 986, 986, 986, 987, 987, 987, 987,
+ 988, 988, 988, 988, 989, 989, 989, 989, 989, 990, 990,
+ 990, 990, 991, 991, 991, 991, 992, 992, 992, 992, 992,
+ 993, 993, 993, 993, 994, 994, 994, 994, 994, 995, 995,
+ 995, 995, 996, 996, 996, 996, 996, 997, 997, 997, 997,
+ 998, 998, 998, 998, 998, 999, 999, 999, 999, 1000, 1000,
+ 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002, 1002,
+ 1002, 1003, 1003, 1003, 1003, 1003, 1004, 1004, 1004, 1004, 1005,
+ 1005, 1005, 1005, 1005, 1006, 1006, 1006, 1006, 1006, 1007, 1007,
+ 1007, 1007, 1008, 1008, 1008, 1008, 1008, 1009, 1009, 1009, 1009,
+ 1009, 1010, 1010, 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012,
+ 1012, 1012, 1012, 1012, 1013, 1013, 1013, 1013, 1013, 1014, 1014,
+ 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1015, 1016, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1018,
+ 1019, 1019, 1019, 1019, 1019, 1020, 1020, 1020, 1020, 1020, 1021,
+ 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1022, 1022, 1023, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_03[GAMMA10_TABLE_LENGTH] = {
+ 0, 128, 157, 178, 194, 207, 219, 229, 239, 247, 255,
+ 263, 270, 276, 282, 288, 294, 299, 304, 309, 314, 319,
+ 323, 328, 332, 336, 340, 344, 348, 351, 355, 358, 362,
+ 365, 368, 372, 375, 378, 381, 384, 387, 390, 393, 395,
+ 398, 401, 403, 406, 409, 411, 414, 416, 419, 421, 423,
+ 426, 428, 430, 432, 435, 437, 439, 441, 443, 445, 447,
+ 450, 452, 454, 456, 458, 460, 461, 463, 465, 467, 469,
+ 471, 473, 474, 476, 478, 480, 482, 483, 485, 487, 488,
+ 490, 492, 493, 495, 497, 498, 500, 501, 503, 505, 506,
+ 508, 509, 511, 512, 514, 515, 517, 518, 520, 521, 523,
+ 524, 525, 527, 528, 530, 531, 532, 534, 535, 537, 538,
+ 539, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552,
+ 553, 555, 556, 557, 558, 560, 561, 562, 563, 565, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 577, 579,
+ 580, 581, 582, 583, 584, 585, 586, 587, 589, 590, 591,
+ 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
+ 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
+ 625, 626, 627, 628, 629, 630, 631, 632, 633, 633, 634,
+ 635, 636, 637, 638, 639, 640, 641, 642, 642, 643, 644,
+ 645, 646, 647, 648, 649, 649, 650, 651, 652, 653, 654,
+ 655, 655, 656, 657, 658, 659, 660, 661, 661, 662, 663,
+ 664, 665, 665, 666, 667, 668, 669, 670, 670, 671, 672,
+ 673, 674, 674, 675, 676, 677, 677, 678, 679, 680, 681,
+ 681, 682, 683, 684, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 691, 692, 693, 694, 694, 695, 696, 696, 697,
+ 698, 699, 699, 700, 701, 702, 702, 703, 704, 704, 705,
+ 706, 707, 707, 708, 709, 709, 710, 711, 712, 712, 713,
+ 714, 714, 715, 716, 716, 717, 718, 718, 719, 720, 721,
+ 721, 722, 723, 723, 724, 725, 725, 726, 727, 727, 728,
+ 729, 729, 730, 731, 731, 732, 733, 733, 734, 734, 735,
+ 736, 736, 737, 738, 738, 739, 740, 740, 741, 742, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 748, 749,
+ 750, 750, 751, 752, 752, 753, 753, 754, 755, 755, 756,
+ 756, 757, 758, 758, 759, 759, 760, 761, 761, 762, 762,
+ 763, 764, 764, 765, 765, 766, 767, 767, 768, 768, 769,
+ 770, 770, 771, 771, 772, 772, 773, 774, 774, 775, 775,
+ 776, 776, 777, 778, 778, 779, 779, 780, 780, 781, 782,
+ 782, 783, 783, 784, 784, 785, 785, 786, 787, 787, 788,
+ 788, 789, 789, 790, 790, 791, 792, 792, 793, 793, 794,
+ 794, 795, 795, 796, 796, 797, 797, 798, 799, 799, 800,
+ 800, 801, 801, 802, 802, 803, 803, 804, 804, 805, 805,
+ 806, 806, 807, 808, 808, 809, 809, 810, 810, 811, 811,
+ 812, 812, 813, 813, 814, 814, 815, 815, 816, 816, 817,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 822,
+ 823, 823, 824, 824, 825, 825, 826, 826, 827, 827, 828,
+ 828, 829, 829, 830, 830, 831, 831, 832, 832, 833, 833,
+ 834, 834, 835, 835, 836, 836, 836, 837, 837, 838, 838,
+ 839, 839, 840, 840, 841, 841, 842, 842, 843, 843, 844,
+ 844, 845, 845, 845, 846, 846, 847, 847, 848, 848, 849,
+ 849, 850, 850, 851, 851, 852, 852, 852, 853, 853, 854,
+ 854, 855, 855, 856, 856, 857, 857, 857, 858, 858, 859,
+ 859, 860, 860, 861, 861, 862, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 866, 867, 867, 868, 868, 869,
+ 869, 869, 870, 870, 871, 871, 872, 872, 873, 873, 873,
+ 874, 874, 875, 875, 876, 876, 876, 877, 877, 878, 878,
+ 879, 879, 879, 880, 880, 881, 881, 882, 882, 882, 883,
+ 883, 884, 884, 885, 885, 885, 886, 886, 887, 887, 887,
+ 888, 888, 889, 889, 890, 890, 890, 891, 891, 892, 892,
+ 892, 893, 893, 894, 894, 895, 895, 895, 896, 896, 897,
+ 897, 897, 898, 898, 899, 899, 899, 900, 900, 901, 901,
+ 901, 902, 902, 903, 903, 903, 904, 904, 905, 905, 905,
+ 906, 906, 907, 907, 907, 908, 908, 909, 909, 909, 910,
+ 910, 911, 911, 911, 912, 912, 913, 913, 913, 914, 914,
+ 915, 915, 915, 916, 916, 916, 917, 917, 918, 918, 918,
+ 919, 919, 920, 920, 920, 921, 921, 921, 922, 922, 923,
+ 923, 923, 924, 924, 925, 925, 925, 926, 926, 926, 927,
+ 927, 928, 928, 928, 929, 929, 929, 930, 930, 931, 931,
+ 931, 932, 932, 932, 933, 933, 934, 934, 934, 935, 935,
+ 935, 936, 936, 936, 937, 937, 938, 938, 938, 939, 939,
+ 939, 940, 940, 941, 941, 941, 942, 942, 942, 943, 943,
+ 943, 944, 944, 945, 945, 945, 946, 946, 946, 947, 947,
+ 947, 948, 948, 948, 949, 949, 950, 950, 950, 951, 951,
+ 951, 952, 952, 952, 953, 953, 953, 954, 954, 955, 955,
+ 955, 956, 956, 956, 957, 957, 957, 958, 958, 958, 959,
+ 959, 959, 960, 960, 960, 961, 961, 962, 962, 962, 963,
+ 963, 963, 964, 964, 964, 965, 965, 965, 966, 966, 966,
+ 967, 967, 967, 968, 968, 968, 969, 969, 969, 970, 970,
+ 970, 971, 971, 971, 972, 972, 972, 973, 973, 973, 974,
+ 974, 974, 975, 975, 975, 976, 976, 976, 977, 977, 977,
+ 978, 978, 978, 979, 979, 979, 980, 980, 980, 981, 981,
+ 981, 982, 982, 982, 983, 983, 983, 984, 984, 984, 985,
+ 985, 985, 986, 986, 986, 987, 987, 987, 988, 988, 988,
+ 989, 989, 989, 990, 990, 990, 991, 991, 991, 992, 992,
+ 992, 993, 993, 993, 994, 994, 994, 994, 995, 995, 995,
+ 996, 996, 996, 997, 997, 997, 998, 998, 998, 999, 999,
+ 999, 1000, 1000, 1000, 1001, 1001, 1001, 1001, 1002, 1002, 1002,
+ 1003, 1003, 1003, 1004, 1004, 1004, 1005, 1005, 1005, 1006, 1006,
+ 1006, 1006, 1007, 1007, 1007, 1008, 1008, 1008, 1009, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1011, 1011, 1012, 1012, 1012, 1013,
+ 1013, 1013, 1014, 1014, 1014, 1015, 1015, 1015, 1015, 1016, 1016,
+ 1016, 1017, 1017, 1017, 1018, 1018, 1018, 1018, 1019, 1019, 1019,
+ 1020, 1020, 1020, 1021, 1021, 1021, 1021, 1022, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_04[GAMMA10_TABLE_LENGTH] = {
+ 0, 64, 84, 99, 111, 122, 131, 139, 147, 154, 161,
+ 167, 173, 178, 184, 189, 194, 199, 203, 208, 212, 216,
+ 220, 224, 228, 232, 235, 239, 243, 246, 249, 253, 256,
+ 259, 262, 265, 268, 271, 274, 277, 280, 283, 285, 288,
+ 291, 293, 296, 298, 301, 303, 306, 308, 311, 313, 315,
+ 318, 320, 322, 325, 327, 329, 331, 333, 335, 338, 340,
+ 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362,
+ 364, 365, 367, 369, 371, 373, 375, 376, 378, 380, 382,
+ 383, 385, 387, 389, 390, 392, 394, 395, 397, 399, 400,
+ 402, 404, 405, 407, 408, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 431, 433, 434,
+ 436, 437, 438, 440, 441, 443, 444, 445, 447, 448, 450,
+ 451, 452, 454, 455, 456, 458, 459, 460, 462, 463, 464,
+ 466, 467, 468, 470, 471, 472, 473, 475, 476, 477, 478,
+ 480, 481, 482, 483, 485, 486, 487, 488, 489, 491, 492,
+ 493, 494, 495, 497, 498, 499, 500, 501, 503, 504, 505,
+ 506, 507, 508, 509, 511, 512, 513, 514, 515, 516, 517,
+ 518, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552,
+ 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 578, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 591, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 600, 601, 602, 603, 604,
+ 605, 606, 607, 607, 608, 609, 610, 611, 612, 613, 614,
+ 614, 615, 616, 617, 618, 619, 620, 620, 621, 622, 623,
+ 624, 625, 625, 626, 627, 628, 629, 630, 630, 631, 632,
+ 633, 634, 635, 635, 636, 637, 638, 639, 639, 640, 641,
+ 642, 643, 643, 644, 645, 646, 647, 647, 648, 649, 650,
+ 651, 651, 652, 653, 654, 655, 655, 656, 657, 658, 658,
+ 659, 660, 661, 662, 662, 663, 664, 665, 665, 666, 667,
+ 668, 668, 669, 670, 671, 671, 672, 673, 674, 674, 675,
+ 676, 677, 677, 678, 679, 680, 680, 681, 682, 683, 683,
+ 684, 685, 685, 686, 687, 688, 688, 689, 690, 691, 691,
+ 692, 693, 693, 694, 695, 696, 696, 697, 698, 698, 699,
+ 700, 701, 701, 702, 703, 703, 704, 705, 705, 706, 707,
+ 708, 708, 709, 710, 710, 711, 712, 712, 713, 714, 714,
+ 715, 716, 717, 717, 718, 719, 719, 720, 721, 721, 722,
+ 723, 723, 724, 725, 725, 726, 727, 727, 728, 729, 729,
+ 730, 731, 731, 732, 733, 733, 734, 735, 735, 736, 737,
+ 737, 738, 739, 739, 740, 740, 741, 742, 742, 743, 744,
+ 744, 745, 746, 746, 747, 748, 748, 749, 749, 750, 751,
+ 751, 752, 753, 753, 754, 755, 755, 756, 756, 757, 758,
+ 758, 759, 760, 760, 761, 761, 762, 763, 763, 764, 765,
+ 765, 766, 766, 767, 768, 768, 769, 769, 770, 771, 771,
+ 772, 773, 773, 774, 774, 775, 776, 776, 777, 777, 778,
+ 779, 779, 780, 780, 781, 782, 782, 783, 783, 784, 785,
+ 785, 786, 786, 787, 788, 788, 789, 789, 790, 791, 791,
+ 792, 792, 793, 793, 794, 795, 795, 796, 796, 797, 798,
+ 798, 799, 799, 800, 800, 801, 802, 802, 803, 803, 804,
+ 804, 805, 806, 806, 807, 807, 808, 808, 809, 810, 810,
+ 811, 811, 812, 812, 813, 814, 814, 815, 815, 816, 816,
+ 817, 818, 818, 819, 819, 820, 820, 821, 821, 822, 823,
+ 823, 824, 824, 825, 825, 826, 826, 827, 827, 828, 829,
+ 829, 830, 830, 831, 831, 832, 832, 833, 834, 834, 835,
+ 835, 836, 836, 837, 837, 838, 838, 839, 839, 840, 841,
+ 841, 842, 842, 843, 843, 844, 844, 845, 845, 846, 846,
+ 847, 847, 848, 849, 849, 850, 850, 851, 851, 852, 852,
+ 853, 853, 854, 854, 855, 855, 856, 856, 857, 857, 858,
+ 859, 859, 860, 860, 861, 861, 862, 862, 863, 863, 864,
+ 864, 865, 865, 866, 866, 867, 867, 868, 868, 869, 869,
+ 870, 870, 871, 871, 872, 872, 873, 873, 874, 874, 875,
+ 875, 876, 876, 877, 877, 878, 878, 879, 879, 880, 880,
+ 881, 881, 882, 882, 883, 883, 884, 884, 885, 885, 886,
+ 886, 887, 887, 888, 888, 889, 889, 890, 890, 891, 891,
+ 892, 892, 893, 893, 894, 894, 895, 895, 896, 896, 897,
+ 897, 898, 898, 899, 899, 900, 900, 901, 901, 902, 902,
+ 903, 903, 904, 904, 905, 905, 905, 906, 906, 907, 907,
+ 908, 908, 909, 909, 910, 910, 911, 911, 912, 912, 913,
+ 913, 914, 914, 915, 915, 915, 916, 916, 917, 917, 918,
+ 918, 919, 919, 920, 920, 921, 921, 922, 922, 923, 923,
+ 923, 924, 924, 925, 925, 926, 926, 927, 927, 928, 928,
+ 929, 929, 929, 930, 930, 931, 931, 932, 932, 933, 933,
+ 934, 934, 935, 935, 935, 936, 936, 937, 937, 938, 938,
+ 939, 939, 940, 940, 940, 941, 941, 942, 942, 943, 943,
+ 944, 944, 945, 945, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 949, 950, 950, 951, 951, 952, 952, 953, 953,
+ 953, 954, 954, 955, 955, 956, 956, 957, 957, 957, 958,
+ 958, 959, 959, 960, 960, 961, 961, 961, 962, 962, 963,
+ 963, 964, 964, 965, 965, 965, 966, 966, 967, 967, 968,
+ 968, 968, 969, 969, 970, 970, 971, 971, 971, 972, 972,
+ 973, 973, 974, 974, 974, 975, 975, 976, 976, 977, 977,
+ 977, 978, 978, 979, 979, 980, 980, 980, 981, 981, 982,
+ 982, 983, 983, 983, 984, 984, 985, 985, 986, 986, 986,
+ 987, 987, 988, 988, 989, 989, 989, 990, 990, 991, 991,
+ 991, 992, 992, 993, 993, 994, 994, 994, 995, 995, 996,
+ 996, 996, 997, 997, 998, 998, 999, 999, 999, 1000, 1000,
+ 1001, 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1004, 1005,
+ 1005, 1006, 1006, 1006, 1007, 1007, 1008, 1008, 1008, 1009, 1009,
+ 1010, 1010, 1010, 1011, 1011, 1012, 1012, 1013, 1013, 1013, 1014,
+ 1014, 1015, 1015, 1015, 1016, 1016, 1017, 1017, 1017, 1018, 1018,
+ 1019, 1019, 1019, 1020, 1020, 1021, 1021, 1021, 1022, 1022, 1023,
+ 1023,
+};
+
+static const u16 xgamma10_05[GAMMA10_TABLE_LENGTH] = {
+ 0, 32, 45, 55, 64, 72, 78, 85, 90, 96, 101,
+ 106, 111, 115, 120, 124, 128, 132, 136, 139, 143, 147,
+ 150, 153, 157, 160, 163, 166, 169, 172, 175, 178, 181,
+ 184, 186, 189, 192, 195, 197, 200, 202, 205, 207, 210,
+ 212, 215, 217, 219, 222, 224, 226, 228, 231, 233, 235,
+ 237, 239, 241, 244, 246, 248, 250, 252, 254, 256, 258,
+ 260, 262, 264, 266, 268, 270, 271, 273, 275, 277, 279,
+ 281, 282, 284, 286, 288, 290, 291, 293, 295, 297, 298,
+ 300, 302, 303, 305, 307, 308, 310, 312, 313, 315, 317,
+ 318, 320, 321, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 338, 340, 341, 343, 344, 346, 347, 349, 350,
+ 352, 353, 355, 356, 358, 359, 360, 362, 363, 365, 366,
+ 367, 369, 370, 372, 373, 374, 376, 377, 378, 380, 381,
+ 382, 384, 385, 386, 388, 389, 390, 392, 393, 394, 396,
+ 397, 398, 399, 401, 402, 403, 405, 406, 407, 408, 410,
+ 411, 412, 413, 415, 416, 417, 418, 419, 421, 422, 423,
+ 424, 426, 427, 428, 429, 430, 431, 433, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 444, 445, 447, 448, 449,
+ 450, 451, 452, 453, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 477, 478, 479, 480, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
+ 520, 521, 522, 523, 524, 525, 526, 527, 527, 528, 529,
+ 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 543, 544, 545, 546, 547, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 569, 570,
+ 571, 572, 573, 574, 575, 576, 577, 577, 578, 579, 580,
+ 581, 582, 583, 584, 585, 585, 586, 587, 588, 589, 590,
+ 591, 591, 592, 593, 594, 595, 596, 597, 598, 598, 599,
+ 600, 601, 602, 603, 603, 604, 605, 606, 607, 608, 609,
+ 609, 610, 611, 612, 613, 614, 614, 615, 616, 617, 618,
+ 619, 619, 620, 621, 622, 623, 623, 624, 625, 626, 627,
+ 628, 628, 629, 630, 631, 632, 632, 633, 634, 635, 636,
+ 636, 637, 638, 639, 640, 640, 641, 642, 643, 644, 644,
+ 645, 646, 647, 648, 648, 649, 650, 651, 652, 652, 653,
+ 654, 655, 655, 656, 657, 658, 659, 659, 660, 661, 662,
+ 662, 663, 664, 665, 666, 666, 667, 668, 669, 669, 670,
+ 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 681, 682, 683, 684, 684, 685, 686, 687,
+ 687, 688, 689, 690, 690, 691, 692, 693, 693, 694, 695,
+ 696, 696, 697, 698, 699, 699, 700, 701, 701, 702, 703,
+ 704, 704, 705, 706, 707, 707, 708, 709, 709, 710, 711,
+ 712, 712, 713, 714, 714, 715, 716, 717, 717, 718, 719,
+ 719, 720, 721, 722, 722, 723, 724, 724, 725, 726, 727,
+ 727, 728, 729, 729, 730, 731, 731, 732, 733, 734, 734,
+ 735, 736, 736, 737, 738, 738, 739, 740, 740, 741, 742,
+ 743, 743, 744, 745, 745, 746, 747, 747, 748, 749, 749,
+ 750, 751, 751, 752, 753, 754, 754, 755, 756, 756, 757,
+ 758, 758, 759, 760, 760, 761, 762, 762, 763, 764, 764,
+ 765, 766, 766, 767, 768, 768, 769, 770, 770, 771, 772,
+ 772, 773, 774, 774, 775, 776, 776, 777, 778, 778, 779,
+ 780, 780, 781, 781, 782, 783, 783, 784, 785, 785, 786,
+ 787, 787, 788, 789, 789, 790, 791, 791, 792, 793, 793,
+ 794, 794, 795, 796, 796, 797, 798, 798, 799, 800, 800,
+ 801, 802, 802, 803, 803, 804, 805, 805, 806, 807, 807,
+ 808, 809, 809, 810, 810, 811, 812, 812, 813, 814, 814,
+ 815, 815, 816, 817, 817, 818, 819, 819, 820, 820, 821,
+ 822, 822, 823, 824, 824, 825, 825, 826, 827, 827, 828,
+ 829, 829, 830, 830, 831, 832, 832, 833, 833, 834, 835,
+ 835, 836, 836, 837, 838, 838, 839, 840, 840, 841, 841,
+ 842, 843, 843, 844, 844, 845, 846, 846, 847, 847, 848,
+ 849, 849, 850, 850, 851, 852, 852, 853, 853, 854, 855,
+ 855, 856, 856, 857, 858, 858, 859, 859, 860, 861, 861,
+ 862, 862, 863, 864, 864, 865, 865, 866, 867, 867, 868,
+ 868, 869, 869, 870, 871, 871, 872, 872, 873, 874, 874,
+ 875, 875, 876, 877, 877, 878, 878, 879, 879, 880, 881,
+ 881, 882, 882, 883, 883, 884, 885, 885, 886, 886, 887,
+ 888, 888, 889, 889, 890, 890, 891, 892, 892, 893, 893,
+ 894, 894, 895, 896, 896, 897, 897, 898, 898, 899, 900,
+ 900, 901, 901, 902, 902, 903, 904, 904, 905, 905, 906,
+ 906, 907, 907, 908, 909, 909, 910, 910, 911, 911, 912,
+ 913, 913, 914, 914, 915, 915, 916, 916, 917, 918, 918,
+ 919, 919, 920, 920, 921, 921, 922, 923, 923, 924, 924,
+ 925, 925, 926, 926, 927, 928, 928, 929, 929, 930, 930,
+ 931, 931, 932, 932, 933, 934, 934, 935, 935, 936, 936,
+ 937, 937, 938, 939, 939, 940, 940, 941, 941, 942, 942,
+ 943, 943, 944, 944, 945, 946, 946, 947, 947, 948, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 953, 954, 954,
+ 955, 955, 956, 956, 957, 957, 958, 958, 959, 960, 960,
+ 961, 961, 962, 962, 963, 963, 964, 964, 965, 965, 966,
+ 966, 967, 967, 968, 969, 969, 970, 970, 971, 971, 972,
+ 972, 973, 973, 974, 974, 975, 975, 976, 976, 977, 977,
+ 978, 979, 979, 980, 980, 981, 981, 982, 982, 983, 983,
+ 984, 984, 985, 985, 986, 986, 987, 987, 988, 988, 989,
+ 989, 990, 990, 991, 992, 992, 993, 993, 994, 994, 995,
+ 995, 996, 996, 997, 997, 998, 998, 999, 999, 1000, 1000,
+ 1001, 1001, 1002, 1002, 1003, 1003, 1004, 1004, 1005, 1005, 1006,
+ 1006, 1007, 1007, 1008, 1008, 1009, 1009, 1010, 1010, 1011, 1011,
+ 1012, 1012, 1013, 1013, 1014, 1014, 1015, 1015, 1016, 1016, 1017,
+ 1017, 1018, 1018, 1019, 1019, 1020, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_06[GAMMA10_TABLE_LENGTH] = {
+ 0, 16, 24, 31, 37, 42, 47, 51, 56, 60, 64,
+ 67, 71, 75, 78, 81, 84, 88, 91, 94, 97, 99,
+ 102, 105, 108, 110, 113, 116, 118, 121, 123, 126, 128,
+ 130, 133, 135, 137, 140, 142, 144, 146, 148, 151, 153,
+ 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175,
+ 177, 179, 181, 183, 185, 187, 188, 190, 192, 194, 196,
+ 198, 199, 201, 203, 205, 206, 208, 210, 212, 213, 215,
+ 217, 218, 220, 222, 223, 225, 227, 228, 230, 232, 233,
+ 235, 236, 238, 240, 241, 243, 244, 246, 247, 249, 250,
+ 252, 253, 255, 257, 258, 260, 261, 263, 264, 265, 267,
+ 268, 270, 271, 273, 274, 276, 277, 279, 280, 281, 283,
+ 284, 286, 287, 288, 290, 291, 293, 294, 295, 297, 298,
+ 299, 301, 302, 303, 305, 306, 308, 309, 310, 312, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 328, 330, 331, 332, 334, 335, 336, 337, 339, 340, 341,
+ 342, 344, 345, 346, 347, 349, 350, 351, 352, 353, 355,
+ 356, 357, 358, 359, 361, 362, 363, 364, 365, 367, 368,
+ 369, 370, 371, 373, 374, 375, 376, 377, 378, 380, 381,
+ 382, 383, 384, 385, 387, 388, 389, 390, 391, 392, 393,
+ 394, 396, 397, 398, 399, 400, 401, 402, 403, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 428, 429, 430,
+ 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441,
+ 442, 443, 445, 446, 447, 448, 449, 450, 451, 452, 453,
+ 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475,
+ 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486,
+ 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
+ 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 507,
+ 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,
+ 519, 520, 521, 522, 523, 524, 525, 525, 526, 527, 528,
+ 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 548,
+ 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 558,
+ 559, 560, 561, 562, 563, 564, 565, 566, 566, 567, 568,
+ 569, 570, 571, 572, 573, 574, 574, 575, 576, 577, 578,
+ 579, 580, 581, 581, 582, 583, 584, 585, 586, 587, 588,
+ 588, 589, 590, 591, 592, 593, 594, 594, 595, 596, 597,
+ 598, 599, 600, 601, 601, 602, 603, 604, 605, 606, 606,
+ 607, 608, 609, 610, 611, 612, 612, 613, 614, 615, 616,
+ 617, 617, 618, 619, 620, 621, 622, 622, 623, 624, 625,
+ 626, 627, 627, 628, 629, 630, 631, 632, 632, 633, 634,
+ 635, 636, 637, 637, 638, 639, 640, 641, 642, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 650, 651, 652,
+ 653, 654, 655, 655, 656, 657, 658, 659, 659, 660, 661,
+ 662, 663, 663, 664, 665, 666, 667, 667, 668, 669, 670,
+ 671, 671, 672, 673, 674, 675, 675, 676, 677, 678, 678,
+ 679, 680, 681, 682, 682, 683, 684, 685, 686, 686, 687,
+ 688, 689, 689, 690, 691, 692, 693, 693, 694, 695, 696,
+ 696, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704,
+ 705, 706, 707, 707, 708, 709, 710, 710, 711, 712, 713,
+ 713, 714, 715, 716, 716, 717, 718, 719, 719, 720, 721,
+ 722, 723, 723, 724, 725, 726, 726, 727, 728, 729, 729,
+ 730, 731, 732, 732, 733, 734, 735, 735, 736, 737, 738,
+ 738, 739, 740, 741, 741, 742, 743, 743, 744, 745, 746,
+ 746, 747, 748, 749, 749, 750, 751, 752, 752, 753, 754,
+ 755, 755, 756, 757, 758, 758, 759, 760, 760, 761, 762,
+ 763, 763, 764, 765, 766, 766, 767, 768, 768, 769, 770,
+ 771, 771, 772, 773, 774, 774, 775, 776, 776, 777, 778,
+ 779, 779, 780, 781, 781, 782, 783, 784, 784, 785, 786,
+ 786, 787, 788, 789, 789, 790, 791, 791, 792, 793, 794,
+ 794, 795, 796, 796, 797, 798, 799, 799, 800, 801, 801,
+ 802, 803, 803, 804, 805, 806, 806, 807, 808, 808, 809,
+ 810, 811, 811, 812, 813, 813, 814, 815, 815, 816, 817,
+ 818, 818, 819, 820, 820, 821, 822, 822, 823, 824, 824,
+ 825, 826, 827, 827, 828, 829, 829, 830, 831, 831, 832,
+ 833, 833, 834, 835, 835, 836, 837, 838, 838, 839, 840,
+ 840, 841, 842, 842, 843, 844, 844, 845, 846, 846, 847,
+ 848, 848, 849, 850, 851, 851, 852, 853, 853, 854, 855,
+ 855, 856, 857, 857, 858, 859, 859, 860, 861, 861, 862,
+ 863, 863, 864, 865, 865, 866, 867, 867, 868, 869, 869,
+ 870, 871, 871, 872, 873, 873, 874, 875, 875, 876, 877,
+ 877, 878, 879, 879, 880, 881, 881, 882, 883, 883, 884,
+ 885, 885, 886, 887, 887, 888, 889, 889, 890, 891, 891,
+ 892, 893, 893, 894, 895, 895, 896, 897, 897, 898, 898,
+ 899, 900, 900, 901, 902, 902, 903, 904, 904, 905, 906,
+ 906, 907, 908, 908, 909, 910, 910, 911, 911, 912, 913,
+ 913, 914, 915, 915, 916, 917, 917, 918, 919, 919, 920,
+ 921, 921, 922, 922, 923, 924, 924, 925, 926, 926, 927,
+ 928, 928, 929, 930, 930, 931, 931, 932, 933, 933, 934,
+ 935, 935, 936, 937, 937, 938, 938, 939, 940, 940, 941,
+ 942, 942, 943, 944, 944, 945, 945, 946, 947, 947, 948,
+ 949, 949, 950, 950, 951, 952, 952, 953, 954, 954, 955,
+ 956, 956, 957, 957, 958, 959, 959, 960, 961, 961, 962,
+ 962, 963, 964, 964, 965, 966, 966, 967, 967, 968, 969,
+ 969, 970, 970, 971, 972, 972, 973, 974, 974, 975, 975,
+ 976, 977, 977, 978, 979, 979, 980, 980, 981, 982, 982,
+ 983, 983, 984, 985, 985, 986, 987, 987, 988, 988, 989,
+ 990, 990, 991, 991, 992, 993, 993, 994, 995, 995, 996,
+ 996, 997, 998, 998, 999, 999, 1000, 1001, 1001, 1002, 1002,
+ 1003, 1004, 1004, 1005, 1006, 1006, 1007, 1007, 1008, 1009, 1009,
+ 1010, 1010, 1011, 1012, 1012, 1013, 1013, 1014, 1015, 1015, 1016,
+ 1016, 1017, 1018, 1018, 1019, 1019, 1020, 1021, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_07[GAMMA10_TABLE_LENGTH] = {
+ 0, 8, 13, 17, 21, 25, 28, 31, 34, 37, 40,
+ 43, 46, 48, 51, 53, 56, 58, 60, 63, 65, 67,
+ 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
+ 92, 94, 96, 98, 100, 102, 104, 106, 108, 109, 111,
+ 113, 115, 117, 118, 120, 122, 124, 125, 127, 129, 131,
+ 132, 134, 136, 137, 139, 140, 142, 144, 145, 147, 149,
+ 150, 152, 153, 155, 157, 158, 160, 161, 163, 164, 166,
+ 167, 169, 170, 172, 173, 175, 176, 178, 179, 181, 182,
+ 184, 185, 187, 188, 190, 191, 192, 194, 195, 197, 198,
+ 199, 201, 202, 204, 205, 206, 208, 209, 211, 212, 213,
+ 215, 216, 217, 219, 220, 222, 223, 224, 226, 227, 228,
+ 230, 231, 232, 234, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 249, 250, 252, 253, 254, 256, 257,
+ 258, 259, 261, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 274, 275, 277, 278, 279, 280, 282, 283, 284,
+ 285, 286, 288, 289, 290, 291, 292, 294, 295, 296, 297,
+ 298, 300, 301, 302, 303, 304, 306, 307, 308, 309, 310,
+ 311, 313, 314, 315, 316, 317, 318, 319, 321, 322, 323,
+ 324, 325, 326, 327, 329, 330, 331, 332, 333, 334, 335,
+ 337, 338, 339, 340, 341, 342, 343, 344, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 368, 369, 370, 371, 372,
+ 373, 374, 375, 376, 377, 378, 379, 380, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 400, 401, 402, 403, 404, 405, 406, 407,
+ 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418,
+ 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429,
+ 430, 431, 432, 433, 434, 435, 436, 437, 439, 440, 441,
+ 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452,
+ 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484,
+ 485, 486, 487, 488, 489, 490, 491, 492, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 511, 512, 513, 514, 515,
+ 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526,
+ 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536,
+ 537, 538, 538, 539, 540, 541, 542, 543, 544, 545, 546,
+ 547, 548, 549, 549, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 560, 560, 561, 562, 563, 564, 565, 566,
+ 567, 568, 569, 569, 570, 571, 572, 573, 574, 575, 576,
+ 577, 578, 578, 579, 580, 581, 582, 583, 584, 585, 586,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 608, 609, 610, 611, 612, 613, 614, 615,
+ 615, 616, 617, 618, 619, 620, 621, 622, 622, 623, 624,
+ 625, 626, 627, 628, 628, 629, 630, 631, 632, 633, 634,
+ 634, 635, 636, 637, 638, 639, 640, 640, 641, 642, 643,
+ 644, 645, 646, 646, 647, 648, 649, 650, 651, 652, 652,
+ 653, 654, 655, 656, 657, 657, 658, 659, 660, 661, 662,
+ 663, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 673, 674, 675, 676, 677, 678, 678, 679, 680,
+ 681, 682, 683, 683, 684, 685, 686, 687, 688, 688, 689,
+ 690, 691, 692, 693, 693, 694, 695, 696, 697, 698, 698,
+ 699, 700, 701, 702, 703, 703, 704, 705, 706, 707, 707,
+ 708, 709, 710, 711, 712, 712, 713, 714, 715, 716, 716,
+ 717, 718, 719, 720, 721, 721, 722, 723, 724, 725, 725,
+ 726, 727, 728, 729, 729, 730, 731, 732, 733, 733, 734,
+ 735, 736, 737, 738, 738, 739, 740, 741, 742, 742, 743,
+ 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752,
+ 753, 754, 754, 755, 756, 757, 758, 758, 759, 760, 761,
+ 761, 762, 763, 764, 765, 765, 766, 767, 768, 769, 769,
+ 770, 771, 772, 773, 773, 774, 775, 776, 777, 777, 778,
+ 779, 780, 780, 781, 782, 783, 784, 784, 785, 786, 787,
+ 788, 788, 789, 790, 791, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 798, 799, 800, 801, 802, 802, 803, 804,
+ 805, 805, 806, 807, 808, 809, 809, 810, 811, 812, 812,
+ 813, 814, 815, 816, 816, 817, 818, 819, 819, 820, 821,
+ 822, 822, 823, 824, 825, 826, 826, 827, 828, 829, 829,
+ 830, 831, 832, 832, 833, 834, 835, 835, 836, 837, 838,
+ 839, 839, 840, 841, 842, 842, 843, 844, 845, 845, 846,
+ 847, 848, 848, 849, 850, 851, 851, 852, 853, 854, 854,
+ 855, 856, 857, 857, 858, 859, 860, 860, 861, 862, 863,
+ 864, 864, 865, 866, 867, 867, 868, 869, 870, 870, 871,
+ 872, 873, 873, 874, 875, 876, 876, 877, 878, 879, 879,
+ 880, 881, 881, 882, 883, 884, 884, 885, 886, 887, 887,
+ 888, 889, 890, 890, 891, 892, 893, 893, 894, 895, 896,
+ 896, 897, 898, 899, 899, 900, 901, 902, 902, 903, 904,
+ 904, 905, 906, 907, 907, 908, 909, 910, 910, 911, 912,
+ 913, 913, 914, 915, 916, 916, 917, 918, 918, 919, 920,
+ 921, 921, 922, 923, 924, 924, 925, 926, 927, 927, 928,
+ 929, 929, 930, 931, 932, 932, 933, 934, 935, 935, 936,
+ 937, 937, 938, 939, 940, 940, 941, 942, 943, 943, 944,
+ 945, 945, 946, 947, 948, 948, 949, 950, 950, 951, 952,
+ 953, 953, 954, 955, 956, 956, 957, 958, 958, 959, 960,
+ 961, 961, 962, 963, 963, 964, 965, 966, 966, 967, 968,
+ 968, 969, 970, 971, 971, 972, 973, 973, 974, 975, 976,
+ 976, 977, 978, 978, 979, 980, 981, 981, 982, 983, 983,
+ 984, 985, 986, 986, 987, 988, 988, 989, 990, 991, 991,
+ 992, 993, 993, 994, 995, 996, 996, 997, 998, 998, 999,
+ 1000, 1000, 1001, 1002, 1003, 1003, 1004, 1005, 1005, 1006, 1007,
+ 1008, 1008, 1009, 1010, 1010, 1011, 1012, 1012, 1013, 1014, 1015,
+ 1015, 1016, 1017, 1017, 1018, 1019, 1019, 1020, 1021, 1022, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_08[GAMMA10_TABLE_LENGTH] = {
+ 0, 4, 7, 10, 12, 14, 17, 19, 21, 23, 25,
+ 27, 29, 31, 33, 35, 37, 39, 40, 42, 44, 46,
+ 47, 49, 51, 53, 54, 56, 58, 59, 61, 62, 64,
+ 66, 67, 69, 70, 72, 73, 75, 76, 78, 80, 81,
+ 83, 84, 86, 87, 89, 90, 91, 93, 94, 96, 97,
+ 99, 100, 102, 103, 104, 106, 107, 109, 110, 111, 113,
+ 114, 116, 117, 118, 120, 121, 122, 124, 125, 126, 128,
+ 129, 131, 132, 133, 135, 136, 137, 138, 140, 141, 142,
+ 144, 145, 146, 148, 149, 150, 152, 153, 154, 155, 157,
+ 158, 159, 160, 162, 163, 164, 166, 167, 168, 169, 171,
+ 172, 173, 174, 176, 177, 178, 179, 181, 182, 183, 184,
+ 185, 187, 188, 189, 190, 192, 193, 194, 195, 196, 198,
+ 199, 200, 201, 202, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 223, 224,
+ 225, 226, 227, 228, 230, 231, 232, 233, 234, 235, 237,
+ 238, 239, 240, 241, 242, 243, 245, 246, 247, 248, 249,
+ 250, 251, 253, 254, 255, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 292, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 318, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
+ 346, 347, 348, 349, 350, 351, 352, 353, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368,
+ 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
+ 380, 381, 382, 383, 384, 385, 386, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
+ 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446,
+ 447, 448, 449, 450, 451, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 486, 487, 488,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 509,
+ 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 527, 528, 529, 530,
+ 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
+ 542, 543, 543, 544, 545, 546, 547, 548, 549, 550, 551,
+ 552, 553, 554, 555, 556, 557, 557, 558, 559, 560, 561,
+ 562, 563, 564, 565, 566, 567, 568, 569, 570, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592,
+ 593, 594, 594, 595, 596, 597, 598, 599, 600, 601, 602,
+ 603, 604, 604, 605, 606, 607, 608, 609, 610, 611, 612,
+ 613, 614, 615, 615, 616, 617, 618, 619, 620, 621, 622,
+ 623, 624, 624, 625, 626, 627, 628, 629, 630, 631, 632,
+ 633, 634, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 643, 644, 645, 646, 647, 648, 649, 650, 651, 651,
+ 652, 653, 654, 655, 656, 657, 658, 659, 660, 660, 661,
+ 662, 663, 664, 665, 666, 667, 668, 668, 669, 670, 671,
+ 672, 673, 674, 675, 676, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 684, 685, 686, 687, 688, 689, 690, 691,
+ 691, 692, 693, 694, 695, 696, 697, 698, 699, 699, 700,
+ 701, 702, 703, 704, 705, 706, 706, 707, 708, 709, 710,
+ 711, 712, 713, 713, 714, 715, 716, 717, 718, 719, 720,
+ 720, 721, 722, 723, 724, 725, 726, 727, 727, 728, 729,
+ 730, 731, 732, 733, 734, 734, 735, 736, 737, 738, 739,
+ 740, 740, 741, 742, 743, 744, 745, 746, 747, 747, 748,
+ 749, 750, 751, 752, 753, 753, 754, 755, 756, 757, 758,
+ 759, 759, 760, 761, 762, 763, 764, 765, 766, 766, 767,
+ 768, 769, 770, 771, 772, 772, 773, 774, 775, 776, 777,
+ 778, 778, 779, 780, 781, 782, 783, 784, 784, 785, 786,
+ 787, 788, 789, 790, 790, 791, 792, 793, 794, 795, 795,
+ 796, 797, 798, 799, 800, 801, 801, 802, 803, 804, 805,
+ 806, 807, 807, 808, 809, 810, 811, 812, 812, 813, 814,
+ 815, 816, 817, 818, 818, 819, 820, 821, 822, 823, 823,
+ 824, 825, 826, 827, 828, 829, 829, 830, 831, 832, 833,
+ 834, 834, 835, 836, 837, 838, 839, 839, 840, 841, 842,
+ 843, 844, 845, 845, 846, 847, 848, 849, 850, 850, 851,
+ 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 860,
+ 861, 862, 863, 864, 865, 865, 866, 867, 868, 869, 870,
+ 870, 871, 872, 873, 874, 875, 875, 876, 877, 878, 879,
+ 880, 880, 881, 882, 883, 884, 885, 885, 886, 887, 888,
+ 889, 890, 890, 891, 892, 893, 894, 895, 895, 896, 897,
+ 898, 899, 899, 900, 901, 902, 903, 904, 904, 905, 906,
+ 907, 908, 909, 909, 910, 911, 912, 913, 913, 914, 915,
+ 916, 917, 918, 918, 919, 920, 921, 922, 923, 923, 924,
+ 925, 926, 927, 927, 928, 929, 930, 931, 932, 932, 933,
+ 934, 935, 936, 936, 937, 938, 939, 940, 941, 941, 942,
+ 943, 944, 945, 945, 946, 947, 948, 949, 950, 950, 951,
+ 952, 953, 954, 954, 955, 956, 957, 958, 958, 959, 960,
+ 961, 962, 963, 963, 964, 965, 966, 967, 967, 968, 969,
+ 970, 971, 971, 972, 973, 974, 975, 976, 976, 977, 978,
+ 979, 980, 980, 981, 982, 983, 984, 984, 985, 986, 987,
+ 988, 988, 989, 990, 991, 992, 992, 993, 994, 995, 996,
+ 997, 997, 998, 999, 1000, 1001, 1001, 1002, 1003, 1004, 1005,
+ 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012, 1013, 1013,
+ 1014, 1015, 1016, 1017, 1017, 1018, 1019, 1020, 1021, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_09[GAMMA10_TABLE_LENGTH] = {
+ 0, 2, 4, 5, 7, 9, 10, 12, 13, 14, 16,
+ 17, 19, 20, 22, 23, 24, 26, 27, 28, 30, 31,
+ 32, 34, 35, 36, 38, 39, 40, 41, 43, 44, 45,
+ 47, 48, 49, 50, 52, 53, 54, 55, 57, 58, 59,
+ 60, 62, 63, 64, 65, 66, 68, 69, 70, 71, 72,
+ 74, 75, 76, 77, 78, 80, 81, 82, 83, 84, 86,
+ 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 99,
+ 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111,
+ 112, 114, 115, 116, 117, 118, 119, 120, 122, 123, 124,
+ 125, 126, 127, 128, 130, 131, 132, 133, 134, 135, 136,
+ 137, 139, 140, 141, 142, 143, 144, 145, 146, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 189, 190, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221,
+ 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267,
+ 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380,
+ 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
+ 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,
+ 425, 426, 427, 427, 428, 429, 430, 431, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445,
+ 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
+ 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
+ 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499,
+ 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510,
+ 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521,
+ 522, 523, 524, 525, 525, 526, 527, 528, 529, 530, 531,
+ 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542,
+ 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
+ 554, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563,
+ 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 579, 580, 581, 582, 583, 584,
+ 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
+ 596, 597, 598, 599, 600, 601, 601, 602, 603, 604, 605,
+ 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616,
+ 617, 618, 619, 620, 621, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 640, 641, 642, 643, 644, 645, 646, 647,
+ 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658,
+ 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668,
+ 669, 670, 671, 672, 673, 674, 675, 675, 676, 677, 678,
+ 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
+ 690, 691, 691, 692, 693, 694, 695, 696, 697, 698, 699,
+ 700, 701, 702, 703, 704, 705, 706, 706, 707, 708, 709,
+ 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720,
+ 721, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730,
+ 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 740,
+ 741, 742, 743, 744, 745, 746, 747, 748, 749, 749, 750,
+ 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761,
+ 762, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 776, 777, 778, 779, 780, 781,
+ 782, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 801,
+ 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812,
+ 813, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822,
+ 823, 824, 825, 825, 826, 827, 828, 829, 830, 831, 832,
+ 833, 834, 835, 836, 836, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 848, 849, 850, 851, 852,
+ 853, 854, 855, 856, 857, 858, 859, 859, 860, 861, 862,
+ 863, 864, 865, 866, 867, 868, 869, 870, 870, 871, 872,
+ 873, 874, 875, 876, 877, 878, 879, 880, 881, 881, 882,
+ 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 892,
+ 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933,
+ 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943,
+ 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953,
+ 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 972,
+ 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 982,
+ 983, 984, 985, 986, 987, 988, 989, 990, 991, 991, 992,
+ 993, 994, 995, 996, 997, 998, 999, 1000, 1000, 1001, 1002,
+ 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1009, 1010, 1011, 1012,
+ 1013, 1014, 1015, 1016, 1017, 1018, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_10[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+ 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
+ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
+ 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
+ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
+ 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
+ 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
+ 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
+ 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
+ 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
+ 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351,
+ 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
+ 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
+ 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
+ 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
+ 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461,
+ 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
+ 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
+ 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
+ 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
+ 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
+ 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560,
+ 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
+ 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
+ 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593,
+ 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
+ 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626,
+ 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
+ 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
+ 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
+ 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670,
+ 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681,
+ 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
+ 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703,
+ 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
+ 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
+ 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
+ 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
+ 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
+ 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791,
+ 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802,
+ 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813,
+ 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824,
+ 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
+ 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846,
+ 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868,
+ 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879,
+ 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901,
+ 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912,
+ 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
+ 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934,
+ 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945,
+ 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956,
+ 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967,
+ 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978,
+ 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989,
+ 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000,
+ 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
+ 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_11[GAMMA10_TABLE_LENGTH] = {
+ 0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6,
+ 7, 8, 8, 9, 10, 11, 11, 12, 13, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20, 21, 22, 23,
+ 23, 24, 25, 26, 27, 27, 28, 29, 30, 31, 31,
+ 32, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40,
+ 41, 42, 43, 44, 44, 45, 46, 47, 48, 49, 49,
+ 50, 51, 52, 53, 54, 54, 55, 56, 57, 58, 59,
+ 59, 60, 61, 62, 63, 64, 65, 65, 66, 67, 68,
+ 69, 70, 71, 71, 72, 73, 74, 75, 76, 77, 78,
+ 78, 79, 80, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 156, 157,
+ 158, 159, 160, 161, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 191, 191, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ 210, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 232, 233, 234, 235, 236, 237, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 261,
+ 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 307, 308, 309, 310, 311, 312, 313, 314, 315,
+ 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
+ 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370,
+ 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403,
+ 404, 405, 406, 407, 408, 409, 410, 411, 412, 414, 415,
+ 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426,
+ 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
+ 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
+ 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
+ 460, 461, 462, 463, 464, 465, 466, 468, 469, 470, 471,
+ 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493,
+ 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 505,
+ 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
+ 528, 529, 530, 531, 532, 533, 535, 536, 537, 538, 539,
+ 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550,
+ 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 562,
+ 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573,
+ 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
+ 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596,
+ 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 608,
+ 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619,
+ 620, 621, 622, 623, 624, 625, 626, 627, 629, 630, 631,
+ 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642,
+ 643, 644, 645, 646, 648, 649, 650, 651, 652, 653, 654,
+ 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665,
+ 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
+ 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712,
+ 713, 714, 715, 716, 717, 719, 720, 721, 722, 723, 724,
+ 725, 726, 727, 728, 729, 730, 731, 732, 733, 735, 736,
+ 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
+ 748, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759,
+ 760, 761, 762, 763, 764, 766, 767, 768, 769, 770, 771,
+ 772, 773, 774, 775, 776, 777, 778, 779, 781, 782, 783,
+ 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 795,
+ 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806,
+ 807, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
+ 819, 820, 821, 823, 824, 825, 826, 827, 828, 829, 830,
+ 831, 832, 833, 834, 835, 837, 838, 839, 840, 841, 842,
+ 843, 844, 845, 846, 847, 848, 850, 851, 852, 853, 854,
+ 855, 856, 857, 858, 859, 860, 861, 863, 864, 865, 866,
+ 867, 868, 869, 870, 871, 872, 873, 874, 876, 877, 878,
+ 879, 880, 881, 882, 883, 884, 885, 886, 887, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 902,
+ 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 914,
+ 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 926,
+ 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 938,
+ 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 950,
+ 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 962,
+ 963, 964, 965, 966, 967, 968, 969, 970, 971, 973, 974,
+ 975, 976, 977, 978, 979, 980, 981, 982, 983, 985, 986,
+ 987, 988, 989, 990, 991, 992, 993, 994, 996, 997, 998,
+ 999, 1000, 1001, 1002, 1003, 1004, 1005, 1007, 1008, 1009, 1010,
+ 1011, 1012, 1013, 1014, 1015, 1016, 1018, 1019, 1020, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_12[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4,
+ 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16,
+ 17, 17, 18, 18, 19, 20, 20, 21, 22, 22, 23,
+ 23, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30,
+ 31, 31, 32, 33, 33, 34, 35, 35, 36, 37, 37,
+ 38, 39, 40, 40, 41, 42, 42, 43, 44, 44, 45,
+ 46, 47, 47, 48, 49, 49, 50, 51, 52, 52, 53,
+ 54, 55, 55, 56, 57, 58, 58, 59, 60, 61, 61,
+ 62, 63, 64, 64, 65, 66, 67, 67, 68, 69, 70,
+ 70, 71, 72, 73, 74, 74, 75, 76, 77, 77, 78,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 88, 88, 89, 90, 91, 92, 92, 93, 94, 95, 96,
+ 96, 97, 98, 99, 100, 101, 101, 102, 103, 104, 105,
+ 105, 106, 107, 108, 109, 110, 110, 111, 112, 113, 114,
+ 115, 115, 116, 117, 118, 119, 120, 120, 121, 122, 123,
+ 124, 125, 125, 126, 127, 128, 129, 130, 131, 131, 132,
+ 133, 134, 135, 136, 137, 137, 138, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 147, 148, 149, 150, 150, 151,
+ 152, 153, 154, 155, 156, 157, 157, 158, 159, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190,
+ 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ 222, 223, 224, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 254, 255, 256, 256, 257, 258, 259, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
+ 274, 275, 276, 277, 278, 279, 280, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
+ 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316,
+ 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327,
+ 328, 329, 330, 331, 332, 333, 334, 334, 335, 336, 337,
+ 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 350, 351, 353, 354, 355, 356, 357, 358, 359, 360,
+ 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
+ 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382,
+ 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
+ 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427,
+ 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 439,
+ 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 462,
+ 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
+ 474, 475, 476, 477, 478, 479, 481, 482, 483, 484, 485,
+ 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496,
+ 497, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508,
+ 509, 510, 511, 512, 513, 515, 516, 517, 518, 519, 520,
+ 521, 522, 523, 524, 525, 526, 527, 529, 530, 531, 532,
+ 533, 534, 535, 536, 537, 538, 539, 540, 541, 543, 544,
+ 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
+ 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
+ 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579,
+ 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591,
+ 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 604,
+ 605, 606, 607, 608, 609, 610, 611, 612, 613, 615, 616,
+ 617, 618, 619, 620, 621, 622, 623, 624, 626, 627, 628,
+ 629, 630, 631, 632, 633, 634, 636, 637, 638, 639, 640,
+ 641, 642, 643, 644, 646, 647, 648, 649, 650, 651, 652,
+ 653, 654, 656, 657, 658, 659, 660, 661, 662, 663, 664,
+ 666, 667, 668, 669, 670, 671, 672, 673, 675, 676, 677,
+ 678, 679, 680, 681, 682, 683, 685, 686, 687, 688, 689,
+ 690, 691, 692, 694, 695, 696, 697, 698, 699, 700, 701,
+ 703, 704, 705, 706, 707, 708, 709, 710, 712, 713, 714,
+ 715, 716, 717, 718, 720, 721, 722, 723, 724, 725, 726,
+ 727, 729, 730, 731, 732, 733, 734, 735, 737, 738, 739,
+ 740, 741, 742, 743, 745, 746, 747, 748, 749, 750, 751,
+ 752, 754, 755, 756, 757, 758, 759, 760, 762, 763, 764,
+ 765, 766, 767, 768, 770, 771, 772, 773, 774, 775, 776,
+ 778, 779, 780, 781, 782, 783, 785, 786, 787, 788, 789,
+ 790, 791, 793, 794, 795, 796, 797, 798, 799, 801, 802,
+ 803, 804, 805, 806, 808, 809, 810, 811, 812, 813, 814,
+ 816, 817, 818, 819, 820, 821, 823, 824, 825, 826, 827,
+ 828, 830, 831, 832, 833, 834, 835, 836, 838, 839, 840,
+ 841, 842, 843, 845, 846, 847, 848, 849, 850, 852, 853,
+ 854, 855, 856, 857, 859, 860, 861, 862, 863, 864, 866,
+ 867, 868, 869, 870, 871, 873, 874, 875, 876, 877, 878,
+ 880, 881, 882, 883, 884, 885, 887, 888, 889, 890, 891,
+ 892, 894, 895, 896, 897, 898, 900, 901, 902, 903, 904,
+ 905, 907, 908, 909, 910, 911, 912, 914, 915, 916, 917,
+ 918, 920, 921, 922, 923, 924, 925, 927, 928, 929, 930,
+ 931, 932, 934, 935, 936, 937, 938, 940, 941, 942, 943,
+ 944, 946, 947, 948, 949, 950, 951, 953, 954, 955, 956,
+ 957, 959, 960, 961, 962, 963, 964, 966, 967, 968, 969,
+ 970, 972, 973, 974, 975, 976, 978, 979, 980, 981, 982,
+ 984, 985, 986, 987, 988, 989, 991, 992, 993, 994, 995,
+ 997, 998, 999, 1000, 1001, 1003, 1004, 1005, 1006, 1007, 1009,
+ 1010, 1011, 1012, 1013, 1015, 1016, 1017, 1018, 1019, 1021, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_13[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
+ 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 26, 27, 27, 28, 28,
+ 29, 30, 30, 31, 31, 32, 32, 33, 34, 34, 35,
+ 35, 36, 37, 37, 38, 38, 39, 40, 40, 41, 42,
+ 42, 43, 43, 44, 45, 45, 46, 47, 47, 48, 48,
+ 49, 50, 50, 51, 52, 52, 53, 54, 54, 55, 56,
+ 56, 57, 58, 58, 59, 60, 60, 61, 62, 62, 63,
+ 64, 64, 65, 66, 67, 67, 68, 69, 69, 70, 71,
+ 71, 72, 73, 74, 74, 75, 76, 76, 77, 78, 79,
+ 79, 80, 81, 81, 82, 83, 84, 84, 85, 86, 87,
+ 87, 88, 89, 89, 90, 91, 92, 92, 93, 94, 95,
+ 95, 96, 97, 98, 98, 99, 100, 101, 102, 102, 103,
+ 104, 105, 105, 106, 107, 108, 108, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 119, 119, 120,
+ 121, 122, 123, 123, 124, 125, 126, 127, 127, 128, 129,
+ 130, 131, 131, 132, 133, 134, 135, 135, 136, 137, 138,
+ 139, 140, 140, 141, 142, 143, 144, 145, 145, 146, 147,
+ 148, 149, 149, 150, 151, 152, 153, 154, 155, 155, 156,
+ 157, 158, 159, 160, 160, 161, 162, 163, 164, 165, 166,
+ 166, 167, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 178, 178, 179, 180, 181, 182, 183, 184, 185,
+ 185, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200, 200, 201, 202, 203, 204,
+ 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 240, 241, 242, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330,
+ 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341,
+ 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352,
+ 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363,
+ 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 386,
+ 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397,
+ 398, 399, 400, 401, 402, 403, 404, 405, 407, 408, 409,
+ 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
+ 421, 422, 423, 424, 426, 427, 428, 429, 430, 431, 432,
+ 433, 434, 435, 436, 437, 438, 439, 440, 442, 443, 444,
+ 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455,
+ 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
+ 468, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479,
+ 480, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491,
+ 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 504,
+ 505, 506, 507, 508, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 520, 521, 522, 523, 525, 526, 527, 528,
+ 529, 530, 531, 532, 534, 535, 536, 537, 538, 539, 540,
+ 541, 542, 544, 545, 546, 547, 548, 549, 550, 551, 553,
+ 554, 555, 556, 557, 558, 559, 561, 562, 563, 564, 565,
+ 566, 567, 568, 570, 571, 572, 573, 574, 575, 576, 578,
+ 579, 580, 581, 582, 583, 584, 586, 587, 588, 589, 590,
+ 591, 592, 594, 595, 596, 597, 598, 599, 600, 602, 603,
+ 604, 605, 606, 607, 608, 610, 611, 612, 613, 614, 615,
+ 617, 618, 619, 620, 621, 622, 624, 625, 626, 627, 628,
+ 629, 630, 632, 633, 634, 635, 636, 637, 639, 640, 641,
+ 642, 643, 644, 646, 647, 648, 649, 650, 652, 653, 654,
+ 655, 656, 657, 659, 660, 661, 662, 663, 664, 666, 667,
+ 668, 669, 670, 671, 673, 674, 675, 676, 677, 679, 680,
+ 681, 682, 683, 684, 686, 687, 688, 689, 690, 692, 693,
+ 694, 695, 696, 698, 699, 700, 701, 702, 704, 705, 706,
+ 707, 708, 709, 711, 712, 713, 714, 715, 717, 718, 719,
+ 720, 721, 723, 724, 725, 726, 727, 729, 730, 731, 732,
+ 733, 735, 736, 737, 738, 739, 741, 742, 743, 744, 746,
+ 747, 748, 749, 750, 752, 753, 754, 755, 756, 758, 759,
+ 760, 761, 762, 764, 765, 766, 767, 769, 770, 771, 772,
+ 773, 775, 776, 777, 778, 780, 781, 782, 783, 784, 786,
+ 787, 788, 789, 791, 792, 793, 794, 795, 797, 798, 799,
+ 800, 802, 803, 804, 805, 807, 808, 809, 810, 811, 813,
+ 814, 815, 816, 818, 819, 820, 821, 823, 824, 825, 826,
+ 827, 829, 830, 831, 832, 834, 835, 836, 837, 839, 840,
+ 841, 842, 844, 845, 846, 847, 849, 850, 851, 852, 854,
+ 855, 856, 857, 859, 860, 861, 862, 864, 865, 866, 867,
+ 869, 870, 871, 872, 874, 875, 876, 877, 879, 880, 881,
+ 882, 884, 885, 886, 887, 889, 890, 891, 892, 894, 895,
+ 896, 897, 899, 900, 901, 903, 904, 905, 906, 908, 909,
+ 910, 911, 913, 914, 915, 916, 918, 919, 920, 922, 923,
+ 924, 925, 927, 928, 929, 930, 932, 933, 934, 935, 937,
+ 938, 939, 941, 942, 943, 944, 946, 947, 948, 950, 951,
+ 952, 953, 955, 956, 957, 958, 960, 961, 962, 964, 965,
+ 966, 967, 969, 970, 971, 973, 974, 975, 976, 978, 979,
+ 980, 982, 983, 984, 985, 987, 988, 989, 991, 992, 993,
+ 994, 996, 997, 998, 1000, 1001, 1002, 1004, 1005, 1006, 1007,
+ 1009, 1010, 1011, 1013, 1014, 1015, 1017, 1018, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_14[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8,
+ 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12,
+ 12, 13, 13, 14, 14, 15, 15, 15, 16, 16, 17,
+ 17, 18, 18, 18, 19, 19, 20, 20, 21, 21, 22,
+ 22, 23, 23, 23, 24, 24, 25, 25, 26, 26, 27,
+ 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32,
+ 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 41, 41, 42, 42, 43, 43, 44, 45,
+ 45, 46, 46, 47, 47, 48, 49, 49, 50, 50, 51,
+ 52, 52, 53, 53, 54, 55, 55, 56, 56, 57, 58,
+ 58, 59, 59, 60, 61, 61, 62, 63, 63, 64, 64,
+ 65, 66, 66, 67, 68, 68, 69, 70, 70, 71, 72,
+ 72, 73, 74, 74, 75, 76, 76, 77, 78, 78, 79,
+ 80, 80, 81, 82, 82, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 103, 103, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 111, 112, 113, 114, 114, 115, 116, 117, 117, 118,
+ 119, 120, 120, 121, 122, 123, 124, 124, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 134, 134, 135,
+ 136, 137, 138, 138, 139, 140, 141, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153,
+ 154, 154, 155, 156, 157, 158, 158, 159, 160, 161, 162,
+ 163, 163, 164, 165, 166, 167, 168, 168, 169, 170, 171,
+ 172, 173, 173, 174, 175, 176, 177, 178, 179, 179, 180,
+ 181, 182, 183, 184, 185, 185, 186, 187, 188, 189, 190,
+ 191, 191, 192, 193, 194, 195, 196, 197, 198, 198, 199,
+ 200, 201, 202, 203, 204, 205, 205, 206, 207, 208, 209,
+ 210, 211, 212, 213, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 222, 223, 224, 225, 226, 227, 228, 229,
+ 230, 231, 232, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 259,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270,
+ 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281,
+ 282, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
+ 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335,
+ 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346,
+ 347, 348, 349, 351, 352, 353, 354, 355, 356, 357, 358,
+ 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369,
+ 370, 371, 372, 373, 374, 375, 377, 378, 379, 380, 381,
+ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
+ 393, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 410, 411, 412, 413, 414, 415, 416,
+ 417, 418, 419, 420, 421, 423, 424, 425, 426, 427, 428,
+ 429, 430, 431, 432, 433, 435, 436, 437, 438, 439, 440,
+ 441, 442, 443, 444, 446, 447, 448, 449, 450, 451, 452,
+ 453, 454, 456, 457, 458, 459, 460, 461, 462, 463, 464,
+ 466, 467, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 481, 482, 484, 485, 486, 487, 488, 489,
+ 490, 491, 493, 494, 495, 496, 497, 498, 499, 501, 502,
+ 503, 504, 505, 506, 507, 509, 510, 511, 512, 513, 514,
+ 515, 517, 518, 519, 520, 521, 522, 524, 525, 526, 527,
+ 528, 529, 531, 532, 533, 534, 535, 536, 537, 539, 540,
+ 541, 542, 543, 544, 546, 547, 548, 549, 550, 552, 553,
+ 554, 555, 556, 557, 559, 560, 561, 562, 563, 564, 566,
+ 567, 568, 569, 570, 572, 573, 574, 575, 576, 578, 579,
+ 580, 581, 582, 583, 585, 586, 587, 588, 589, 591, 592,
+ 593, 594, 595, 597, 598, 599, 600, 601, 603, 604, 605,
+ 606, 607, 609, 610, 611, 612, 613, 615, 616, 617, 618,
+ 620, 621, 622, 623, 624, 626, 627, 628, 629, 630, 632,
+ 633, 634, 635, 637, 638, 639, 640, 641, 643, 644, 645,
+ 646, 648, 649, 650, 651, 653, 654, 655, 656, 657, 659,
+ 660, 661, 662, 664, 665, 666, 667, 669, 670, 671, 672,
+ 674, 675, 676, 677, 679, 680, 681, 682, 684, 685, 686,
+ 687, 689, 690, 691, 692, 694, 695, 696, 697, 699, 700,
+ 701, 702, 704, 705, 706, 707, 709, 710, 711, 712, 714,
+ 715, 716, 717, 719, 720, 721, 723, 724, 725, 726, 728,
+ 729, 730, 731, 733, 734, 735, 737, 738, 739, 740, 742,
+ 743, 744, 745, 747, 748, 749, 751, 752, 753, 754, 756,
+ 757, 758, 760, 761, 762, 763, 765, 766, 767, 769, 770,
+ 771, 772, 774, 775, 776, 778, 779, 780, 782, 783, 784,
+ 785, 787, 788, 789, 791, 792, 793, 794, 796, 797, 798,
+ 800, 801, 802, 804, 805, 806, 808, 809, 810, 811, 813,
+ 814, 815, 817, 818, 819, 821, 822, 823, 825, 826, 827,
+ 829, 830, 831, 833, 834, 835, 836, 838, 839, 840, 842,
+ 843, 844, 846, 847, 848, 850, 851, 852, 854, 855, 856,
+ 858, 859, 860, 862, 863, 864, 866, 867, 868, 870, 871,
+ 872, 874, 875, 876, 878, 879, 880, 882, 883, 884, 886,
+ 887, 888, 890, 891, 893, 894, 895, 897, 898, 899, 901,
+ 902, 903, 905, 906, 907, 909, 910, 911, 913, 914, 915,
+ 917, 918, 920, 921, 922, 924, 925, 926, 928, 929, 930,
+ 932, 933, 935, 936, 937, 939, 940, 941, 943, 944, 945,
+ 947, 948, 950, 951, 952, 954, 955, 956, 958, 959, 961,
+ 962, 963, 965, 966, 967, 969, 970, 972, 973, 974, 976,
+ 977, 978, 980, 981, 983, 984, 985, 987, 988, 990, 991,
+ 992, 994, 995, 996, 998, 999, 1001, 1002, 1003, 1005, 1006,
+ 1008, 1009, 1010, 1012, 1013, 1015, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_15[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9,
+ 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12,
+ 13, 13, 13, 14, 14, 15, 15, 15, 16, 16, 16,
+ 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21,
+ 21, 22, 22, 22, 23, 23, 24, 24, 25, 25, 25,
+ 26, 26, 27, 27, 28, 28, 28, 29, 29, 30, 30,
+ 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36,
+ 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
+ 42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47,
+ 47, 48, 48, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 55, 55, 56, 56, 57, 57, 58, 59, 59,
+ 60, 60, 61, 62, 62, 63, 63, 64, 64, 65, 66,
+ 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72,
+ 73, 74, 74, 75, 76, 76, 77, 77, 78, 79, 79,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 88, 88, 89, 90, 90, 91, 92, 92, 93, 94,
+ 94, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 113, 113, 114, 115, 116, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 133, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 158, 158, 159,
+ 160, 161, 162, 162, 163, 164, 165, 166, 167, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 193, 194, 195, 196,
+ 197, 198, 199, 199, 200, 201, 202, 203, 204, 205, 206,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 245,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 261, 262, 263, 264, 265, 266,
+ 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288,
+ 289, 290, 291, 292, 293, 293, 294, 295, 296, 297, 298,
+ 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309,
+ 310, 311, 312, 314, 315, 316, 317, 318, 319, 320, 321,
+ 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366,
+ 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378,
+ 379, 380, 381, 383, 384, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 403, 404, 405, 407, 408, 409, 410, 411, 412, 413, 414,
+ 415, 417, 418, 419, 420, 421, 422, 423, 424, 425, 427,
+ 428, 429, 430, 431, 432, 433, 434, 436, 437, 438, 439,
+ 440, 441, 442, 444, 445, 446, 447, 448, 449, 450, 451,
+ 453, 454, 455, 456, 457, 458, 460, 461, 462, 463, 464,
+ 465, 466, 468, 469, 470, 471, 472, 473, 475, 476, 477,
+ 478, 479, 480, 482, 483, 484, 485, 486, 487, 489, 490,
+ 491, 492, 493, 494, 496, 497, 498, 499, 500, 501, 503,
+ 504, 505, 506, 507, 509, 510, 511, 512, 513, 515, 516,
+ 517, 518, 519, 521, 522, 523, 524, 525, 527, 528, 529,
+ 530, 531, 533, 534, 535, 536, 537, 539, 540, 541, 542,
+ 543, 545, 546, 547, 548, 550, 551, 552, 553, 554, 556,
+ 557, 558, 559, 561, 562, 563, 564, 565, 567, 568, 569,
+ 570, 572, 573, 574, 575, 577, 578, 579, 580, 582, 583,
+ 584, 585, 587, 588, 589, 590, 591, 593, 594, 595, 596,
+ 598, 599, 600, 602, 603, 604, 605, 607, 608, 609, 610,
+ 612, 613, 614, 615, 617, 618, 619, 620, 622, 623, 624,
+ 626, 627, 628, 629, 631, 632, 633, 634, 636, 637, 638,
+ 640, 641, 642, 643, 645, 646, 647, 649, 650, 651, 652,
+ 654, 655, 656, 658, 659, 660, 662, 663, 664, 665, 667,
+ 668, 669, 671, 672, 673, 675, 676, 677, 678, 680, 681,
+ 682, 684, 685, 686, 688, 689, 690, 692, 693, 694, 696,
+ 697, 698, 700, 701, 702, 703, 705, 706, 707, 709, 710,
+ 711, 713, 714, 715, 717, 718, 719, 721, 722, 723, 725,
+ 726, 727, 729, 730, 731, 733, 734, 735, 737, 738, 740,
+ 741, 742, 744, 745, 746, 748, 749, 750, 752, 753, 754,
+ 756, 757, 758, 760, 761, 763, 764, 765, 767, 768, 769,
+ 771, 772, 773, 775, 776, 778, 779, 780, 782, 783, 784,
+ 786, 787, 789, 790, 791, 793, 794, 795, 797, 798, 800,
+ 801, 802, 804, 805, 806, 808, 809, 811, 812, 813, 815,
+ 816, 818, 819, 820, 822, 823, 825, 826, 827, 829, 830,
+ 832, 833, 834, 836, 837, 839, 840, 841, 843, 844, 846,
+ 847, 848, 850, 851, 853, 854, 855, 857, 858, 860, 861,
+ 863, 864, 865, 867, 868, 870, 871, 872, 874, 875, 877,
+ 878, 880, 881, 882, 884, 885, 887, 888, 890, 891, 892,
+ 894, 895, 897, 898, 900, 901, 902, 904, 905, 907, 908,
+ 910, 911, 913, 914, 915, 917, 918, 920, 921, 923, 924,
+ 926, 927, 929, 930, 931, 933, 934, 936, 937, 939, 940,
+ 942, 943, 945, 946, 947, 949, 950, 952, 953, 955, 956,
+ 958, 959, 961, 962, 964, 965, 967, 968, 969, 971, 972,
+ 974, 975, 977, 978, 980, 981, 983, 984, 986, 987, 989,
+ 990, 992, 993, 995, 996, 998, 999, 1001, 1002, 1004, 1005,
+ 1007, 1008, 1010, 1011, 1013, 1014, 1016, 1017, 1019, 1020, 1022,
+ 1023,
+};
+
+static const u16 xgamma10_16[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16,
+ 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20,
+ 20, 21, 21, 21, 22, 22, 22, 23, 23, 24, 24,
+ 24, 25, 25, 26, 26, 26, 27, 27, 28, 28, 28,
+ 29, 29, 30, 30, 31, 31, 31, 32, 32, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 39, 39, 40, 40, 41, 41, 41, 42, 42, 43, 43,
+ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49,
+ 49, 50, 50, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 65, 65, 66, 66, 67,
+ 67, 68, 69, 69, 70, 70, 71, 72, 72, 73, 73,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 86, 86, 87,
+ 87, 88, 89, 89, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 132, 132,
+ 133, 134, 135, 135, 136, 137, 138, 138, 139, 140, 141,
+ 141, 142, 143, 144, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 159, 160, 161, 162, 163, 163, 164, 165, 166, 167,
+ 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176,
+ 176, 177, 178, 179, 180, 181, 181, 182, 183, 184, 185,
+ 186, 186, 187, 188, 189, 190, 191, 192, 192, 193, 194,
+ 195, 196, 197, 198, 198, 199, 200, 201, 202, 203, 204,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 212, 213,
+ 214, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
+ 320, 321, 322, 323, 324, 325, 326, 327, 329, 330, 331,
+ 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 343, 344, 345, 346, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 362, 363, 364, 365, 366,
+ 367, 368, 369, 370, 371, 372, 374, 375, 376, 377, 378,
+ 379, 380, 381, 382, 383, 385, 386, 387, 388, 389, 390,
+ 391, 392, 393, 395, 396, 397, 398, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 409, 410, 411, 413, 414, 415,
+ 416, 417, 418, 419, 421, 422, 423, 424, 425, 426, 428,
+ 429, 430, 431, 432, 433, 434, 436, 437, 438, 439, 440,
+ 441, 443, 444, 445, 446, 447, 448, 450, 451, 452, 453,
+ 454, 456, 457, 458, 459, 460, 461, 463, 464, 465, 466,
+ 467, 469, 470, 471, 472, 473, 475, 476, 477, 478, 479,
+ 481, 482, 483, 484, 485, 487, 488, 489, 490, 491, 493,
+ 494, 495, 496, 498, 499, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 512, 514, 515, 516, 517, 519, 520,
+ 521, 522, 523, 525, 526, 527, 528, 530, 531, 532, 533,
+ 535, 536, 537, 538, 540, 541, 542, 544, 545, 546, 547,
+ 549, 550, 551, 552, 554, 555, 556, 557, 559, 560, 561,
+ 563, 564, 565, 566, 568, 569, 570, 572, 573, 574, 575,
+ 577, 578, 579, 581, 582, 583, 584, 586, 587, 588, 590,
+ 591, 592, 594, 595, 596, 598, 599, 600, 601, 603, 604,
+ 605, 607, 608, 609, 611, 612, 613, 615, 616, 617, 619,
+ 620, 621, 623, 624, 625, 627, 628, 629, 631, 632, 633,
+ 635, 636, 637, 639, 640, 641, 643, 644, 645, 647, 648,
+ 649, 651, 652, 653, 655, 656, 657, 659, 660, 662, 663,
+ 664, 666, 667, 668, 670, 671, 672, 674, 675, 677, 678,
+ 679, 681, 682, 683, 685, 686, 688, 689, 690, 692, 693,
+ 694, 696, 697, 699, 700, 701, 703, 704, 706, 707, 708,
+ 710, 711, 712, 714, 715, 717, 718, 719, 721, 722, 724,
+ 725, 727, 728, 729, 731, 732, 734, 735, 736, 738, 739,
+ 741, 742, 743, 745, 746, 748, 749, 751, 752, 753, 755,
+ 756, 758, 759, 761, 762, 763, 765, 766, 768, 769, 771,
+ 772, 774, 775, 776, 778, 779, 781, 782, 784, 785, 787,
+ 788, 789, 791, 792, 794, 795, 797, 798, 800, 801, 803,
+ 804, 805, 807, 808, 810, 811, 813, 814, 816, 817, 819,
+ 820, 822, 823, 825, 826, 827, 829, 830, 832, 833, 835,
+ 836, 838, 839, 841, 842, 844, 845, 847, 848, 850, 851,
+ 853, 854, 856, 857, 859, 860, 862, 863, 865, 866, 868,
+ 869, 871, 872, 874, 875, 877, 878, 880, 881, 883, 884,
+ 886, 887, 889, 890, 892, 893, 895, 897, 898, 900, 901,
+ 903, 904, 906, 907, 909, 910, 912, 913, 915, 916, 918,
+ 919, 921, 923, 924, 926, 927, 929, 930, 932, 933, 935,
+ 936, 938, 940, 941, 943, 944, 946, 947, 949, 950, 952,
+ 954, 955, 957, 958, 960, 961, 963, 964, 966, 968, 969,
+ 971, 972, 974, 975, 977, 979, 980, 982, 983, 985, 986,
+ 988, 990, 991, 993, 994, 996, 998, 999, 1001, 1002, 1004,
+ 1005, 1007, 1009, 1010, 1012, 1013, 1015, 1017, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_17[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19,
+ 19, 20, 20, 20, 21, 21, 21, 22, 22, 22, 23,
+ 23, 23, 24, 24, 25, 25, 25, 26, 26, 26, 27,
+ 27, 28, 28, 28, 29, 29, 29, 30, 30, 31, 31,
+ 31, 32, 32, 33, 33, 34, 34, 34, 35, 35, 36,
+ 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 40,
+ 41, 41, 42, 42, 43, 43, 44, 44, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51,
+ 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 79, 79, 80, 80, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 88,
+ 88, 89, 89, 90, 91, 91, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 104, 104, 105, 106, 106, 107, 108, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 114, 115, 116, 116,
+ 117, 118, 119, 119, 120, 121, 121, 122, 123, 124, 124,
+ 125, 126, 126, 127, 128, 129, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 140,
+ 141, 142, 143, 143, 144, 145, 146, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 156, 157,
+ 158, 159, 160, 160, 161, 162, 163, 164, 164, 165, 166,
+ 167, 168, 168, 169, 170, 171, 172, 172, 173, 174, 175,
+ 176, 177, 177, 178, 179, 180, 181, 182, 182, 183, 184,
+ 185, 186, 187, 187, 188, 189, 190, 191, 192, 193, 193,
+ 194, 195, 196, 197, 198, 199, 199, 200, 201, 202, 203,
+ 204, 205, 206, 206, 207, 208, 209, 210, 211, 212, 213,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 248, 249, 250, 251, 252, 253,
+ 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
+ 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+ 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297,
+ 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308,
+ 309, 310, 311, 312, 313, 314, 315, 316, 317, 319, 320,
+ 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331,
+ 332, 333, 334, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 346, 347, 349, 350, 351, 352, 353, 354, 355,
+ 356, 357, 358, 360, 361, 362, 363, 364, 365, 366, 367,
+ 368, 370, 371, 372, 373, 374, 375, 376, 377, 379, 380,
+ 381, 382, 383, 384, 385, 386, 388, 389, 390, 391, 392,
+ 393, 394, 396, 397, 398, 399, 400, 401, 403, 404, 405,
+ 406, 407, 408, 409, 411, 412, 413, 414, 415, 417, 418,
+ 419, 420, 421, 422, 424, 425, 426, 427, 428, 430, 431,
+ 432, 433, 434, 435, 437, 438, 439, 440, 441, 443, 444,
+ 445, 446, 448, 449, 450, 451, 452, 454, 455, 456, 457,
+ 458, 460, 461, 462, 463, 465, 466, 467, 468, 469, 471,
+ 472, 473, 474, 476, 477, 478, 479, 481, 482, 483, 484,
+ 486, 487, 488, 489, 491, 492, 493, 494, 496, 497, 498,
+ 499, 501, 502, 503, 505, 506, 507, 508, 510, 511, 512,
+ 513, 515, 516, 517, 519, 520, 521, 522, 524, 525, 526,
+ 528, 529, 530, 532, 533, 534, 535, 537, 538, 539, 541,
+ 542, 543, 545, 546, 547, 549, 550, 551, 552, 554, 555,
+ 556, 558, 559, 560, 562, 563, 564, 566, 567, 568, 570,
+ 571, 572, 574, 575, 576, 578, 579, 580, 582, 583, 584,
+ 586, 587, 589, 590, 591, 593, 594, 595, 597, 598, 599,
+ 601, 602, 604, 605, 606, 608, 609, 610, 612, 613, 615,
+ 616, 617, 619, 620, 621, 623, 624, 626, 627, 628, 630,
+ 631, 633, 634, 635, 637, 638, 640, 641, 642, 644, 645,
+ 647, 648, 649, 651, 652, 654, 655, 656, 658, 659, 661,
+ 662, 664, 665, 666, 668, 669, 671, 672, 674, 675, 676,
+ 678, 679, 681, 682, 684, 685, 686, 688, 689, 691, 692,
+ 694, 695, 697, 698, 699, 701, 702, 704, 705, 707, 708,
+ 710, 711, 713, 714, 716, 717, 718, 720, 721, 723, 724,
+ 726, 727, 729, 730, 732, 733, 735, 736, 738, 739, 741,
+ 742, 744, 745, 747, 748, 750, 751, 753, 754, 756, 757,
+ 759, 760, 762, 763, 765, 766, 768, 769, 771, 772, 774,
+ 775, 777, 778, 780, 781, 783, 784, 786, 787, 789, 790,
+ 792, 793, 795, 797, 798, 800, 801, 803, 804, 806, 807,
+ 809, 810, 812, 814, 815, 817, 818, 820, 821, 823, 824,
+ 826, 827, 829, 831, 832, 834, 835, 837, 838, 840, 842,
+ 843, 845, 846, 848, 849, 851, 853, 854, 856, 857, 859,
+ 860, 862, 864, 865, 867, 868, 870, 872, 873, 875, 876,
+ 878, 880, 881, 883, 884, 886, 888, 889, 891, 892, 894,
+ 896, 897, 899, 900, 902, 904, 905, 907, 908, 910, 912,
+ 913, 915, 917, 918, 920, 921, 923, 925, 926, 928, 930,
+ 931, 933, 935, 936, 938, 939, 941, 943, 944, 946, 948,
+ 949, 951, 953, 954, 956, 958, 959, 961, 963, 964, 966,
+ 968, 969, 971, 973, 974, 976, 978, 979, 981, 983, 984,
+ 986, 988, 989, 991, 993, 994, 996, 998, 999, 1001, 1003,
+ 1004, 1006, 1008, 1009, 1011, 1013, 1015, 1016, 1018, 1020, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_18[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18,
+ 18, 19, 19, 19, 20, 20, 20, 21, 21, 21, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 27, 27, 27, 28, 28, 29, 29, 29,
+ 30, 30, 30, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 35, 35, 35, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 40, 40, 40, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 51, 51, 52, 52, 53,
+ 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 89, 90, 91, 91, 92, 92, 93, 94, 94, 95, 96,
+ 96, 97, 97, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 104, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 114, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 121, 122, 123, 123, 124, 125,
+ 126, 126, 127, 128, 128, 129, 130, 131, 131, 132, 133,
+ 133, 134, 135, 136, 136, 137, 138, 139, 139, 140, 141,
+ 142, 142, 143, 144, 145, 145, 146, 147, 148, 148, 149,
+ 150, 151, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 158, 159, 160, 161, 162, 162, 163, 164, 165, 166, 166,
+ 167, 168, 169, 170, 170, 171, 172, 173, 174, 175, 175,
+ 176, 177, 178, 179, 179, 180, 181, 182, 183, 184, 184,
+ 185, 186, 187, 188, 189, 190, 190, 191, 192, 193, 194,
+ 195, 196, 196, 197, 198, 199, 200, 201, 202, 203, 203,
+ 204, 205, 206, 207, 208, 209, 210, 210, 211, 212, 213,
+ 214, 215, 216, 217, 218, 219, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
+ 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
+ 299, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
+ 311, 312, 313, 314, 315, 316, 317, 319, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 330, 331, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 344, 345, 346,
+ 347, 348, 349, 350, 351, 353, 354, 355, 356, 357, 358,
+ 359, 360, 362, 363, 364, 365, 366, 367, 368, 370, 371,
+ 372, 373, 374, 375, 376, 378, 379, 380, 381, 382, 383,
+ 385, 386, 387, 388, 389, 390, 392, 393, 394, 395, 396,
+ 397, 399, 400, 401, 402, 403, 405, 406, 407, 408, 409,
+ 411, 412, 413, 414, 415, 417, 418, 419, 420, 421, 423,
+ 424, 425, 426, 427, 429, 430, 431, 432, 434, 435, 436,
+ 437, 439, 440, 441, 442, 443, 445, 446, 447, 448, 450,
+ 451, 452, 453, 455, 456, 457, 458, 460, 461, 462, 464,
+ 465, 466, 467, 469, 470, 471, 472, 474, 475, 476, 478,
+ 479, 480, 481, 483, 484, 485, 487, 488, 489, 490, 492,
+ 493, 494, 496, 497, 498, 500, 501, 502, 504, 505, 506,
+ 507, 509, 510, 511, 513, 514, 515, 517, 518, 519, 521,
+ 522, 523, 525, 526, 527, 529, 530, 531, 533, 534, 535,
+ 537, 538, 540, 541, 542, 544, 545, 546, 548, 549, 550,
+ 552, 553, 555, 556, 557, 559, 560, 561, 563, 564, 566,
+ 567, 568, 570, 571, 572, 574, 575, 577, 578, 579, 581,
+ 582, 584, 585, 586, 588, 589, 591, 592, 594, 595, 596,
+ 598, 599, 601, 602, 603, 605, 606, 608, 609, 611, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 642, 644,
+ 645, 647, 648, 650, 651, 653, 654, 656, 657, 659, 660,
+ 662, 663, 665, 666, 668, 669, 671, 672, 673, 675, 676,
+ 678, 679, 681, 682, 684, 686, 687, 689, 690, 692, 693,
+ 695, 696, 698, 699, 701, 702, 704, 705, 707, 708, 710,
+ 711, 713, 714, 716, 717, 719, 721, 722, 724, 725, 727,
+ 728, 730, 731, 733, 734, 736, 738, 739, 741, 742, 744,
+ 745, 747, 749, 750, 752, 753, 755, 756, 758, 760, 761,
+ 763, 764, 766, 767, 769, 771, 772, 774, 775, 777, 779,
+ 780, 782, 783, 785, 787, 788, 790, 791, 793, 795, 796,
+ 798, 799, 801, 803, 804, 806, 807, 809, 811, 812, 814,
+ 816, 817, 819, 820, 822, 824, 825, 827, 829, 830, 832,
+ 834, 835, 837, 839, 840, 842, 843, 845, 847, 848, 850,
+ 852, 853, 855, 857, 858, 860, 862, 863, 865, 867, 868,
+ 870, 872, 873, 875, 877, 878, 880, 882, 884, 885, 887,
+ 889, 890, 892, 894, 895, 897, 899, 900, 902, 904, 906,
+ 907, 909, 911, 912, 914, 916, 918, 919, 921, 923, 924,
+ 926, 928, 930, 931, 933, 935, 936, 938, 940, 942, 943,
+ 945, 947, 949, 950, 952, 954, 956, 957, 959, 961, 963,
+ 964, 966, 968, 970, 971, 973, 975, 977, 978, 980, 982,
+ 984, 986, 987, 989, 991, 993, 994, 996, 998, 1000, 1002,
+ 1003, 1005, 1007, 1009, 1010, 1012, 1014, 1016, 1018, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_19[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15,
+ 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 21,
+ 21, 21, 22, 22, 22, 22, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 29, 29, 29, 30, 30, 30, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 35, 35, 35, 36,
+ 36, 36, 37, 37, 38, 38, 38, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 48, 48, 49, 49, 50,
+ 50, 51, 51, 51, 52, 52, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 74, 74, 75, 75, 76, 76, 77, 77,
+ 78, 79, 79, 80, 80, 81, 81, 82, 83, 83, 84,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 91, 92, 93, 93, 94, 94, 95, 96, 96, 97,
+ 98, 98, 99, 99, 100, 101, 101, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 108, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 128, 128, 129, 130, 130, 131, 132, 133, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 165, 166, 167, 168,
+ 169, 169, 170, 171, 172, 173, 173, 174, 175, 176, 177,
+ 178, 178, 179, 180, 181, 182, 183, 183, 184, 185, 186,
+ 187, 188, 188, 189, 190, 191, 192, 193, 194, 195, 195,
+ 196, 197, 198, 199, 200, 201, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 232, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
+ 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
+ 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
+ 291, 292, 293, 294, 295, 296, 297, 299, 300, 301, 302,
+ 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 333, 334, 336, 337, 338,
+ 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350,
+ 351, 353, 354, 355, 356, 357, 358, 360, 361, 362, 363,
+ 364, 365, 367, 368, 369, 370, 371, 372, 374, 375, 376,
+ 377, 378, 379, 381, 382, 383, 384, 385, 387, 388, 389,
+ 390, 391, 393, 394, 395, 396, 397, 399, 400, 401, 402,
+ 404, 405, 406, 407, 408, 410, 411, 412, 413, 415, 416,
+ 417, 418, 420, 421, 422, 423, 425, 426, 427, 428, 430,
+ 431, 432, 433, 435, 436, 437, 439, 440, 441, 442, 444,
+ 445, 446, 447, 449, 450, 451, 453, 454, 455, 456, 458,
+ 459, 460, 462, 463, 464, 466, 467, 468, 470, 471, 472,
+ 473, 475, 476, 477, 479, 480, 481, 483, 484, 485, 487,
+ 488, 489, 491, 492, 493, 495, 496, 498, 499, 500, 502,
+ 503, 504, 506, 507, 508, 510, 511, 512, 514, 515, 517,
+ 518, 519, 521, 522, 523, 525, 526, 528, 529, 530, 532,
+ 533, 535, 536, 537, 539, 540, 542, 543, 544, 546, 547,
+ 549, 550, 551, 553, 554, 556, 557, 559, 560, 561, 563,
+ 564, 566, 567, 569, 570, 572, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 608, 610, 611,
+ 613, 614, 616, 617, 619, 620, 622, 623, 625, 626, 628,
+ 629, 631, 632, 634, 635, 637, 638, 640, 641, 643, 644,
+ 646, 647, 649, 650, 652, 653, 655, 656, 658, 660, 661,
+ 663, 664, 666, 667, 669, 670, 672, 674, 675, 677, 678,
+ 680, 681, 683, 684, 686, 688, 689, 691, 692, 694, 696,
+ 697, 699, 700, 702, 703, 705, 707, 708, 710, 711, 713,
+ 715, 716, 718, 719, 721, 723, 724, 726, 728, 729, 731,
+ 732, 734, 736, 737, 739, 741, 742, 744, 745, 747, 749,
+ 750, 752, 754, 755, 757, 759, 760, 762, 764, 765, 767,
+ 768, 770, 772, 773, 775, 777, 778, 780, 782, 783, 785,
+ 787, 789, 790, 792, 794, 795, 797, 799, 800, 802, 804,
+ 805, 807, 809, 810, 812, 814, 816, 817, 819, 821, 822,
+ 824, 826, 828, 829, 831, 833, 834, 836, 838, 840, 841,
+ 843, 845, 847, 848, 850, 852, 854, 855, 857, 859, 861,
+ 862, 864, 866, 868, 869, 871, 873, 875, 876, 878, 880,
+ 882, 883, 885, 887, 889, 891, 892, 894, 896, 898, 899,
+ 901, 903, 905, 907, 908, 910, 912, 914, 916, 917, 919,
+ 921, 923, 925, 926, 928, 930, 932, 934, 936, 937, 939,
+ 941, 943, 945, 947, 948, 950, 952, 954, 956, 958, 959,
+ 961, 963, 965, 967, 969, 970, 972, 974, 976, 978, 980,
+ 982, 983, 985, 987, 989, 991, 993, 995, 997, 998, 1000,
+ 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_20[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17,
+ 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 53, 53, 54, 54, 54, 55, 55, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 62, 62,
+ 63, 63, 64, 64, 65, 65, 66, 66, 67, 67, 68,
+ 68, 69, 69, 70, 70, 71, 71, 72, 72, 73, 73,
+ 74, 74, 75, 76, 76, 77, 77, 78, 78, 79, 79,
+ 80, 81, 81, 82, 82, 83, 83, 84, 84, 85, 86,
+ 86, 87, 87, 88, 89, 89, 90, 90, 91, 92, 92,
+ 93, 93, 94, 95, 95, 96, 96, 97, 98, 98, 99,
+ 99, 100, 101, 101, 102, 103, 103, 104, 105, 105, 106,
+ 106, 107, 108, 108, 109, 110, 110, 111, 112, 112, 113,
+ 114, 114, 115, 116, 116, 117, 118, 118, 119, 120, 120,
+ 121, 122, 122, 123, 124, 125, 125, 126, 127, 127, 128,
+ 129, 130, 130, 131, 132, 132, 133, 134, 135, 135, 136,
+ 137, 137, 138, 139, 140, 140, 141, 142, 143, 143, 144,
+ 145, 146, 146, 147, 148, 149, 149, 150, 151, 152, 153,
+ 153, 154, 155, 156, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 164, 165, 166, 167, 168, 168, 169, 170,
+ 171, 172, 172, 173, 174, 175, 176, 177, 177, 178, 179,
+ 180, 181, 182, 182, 183, 184, 185, 186, 187, 188, 188,
+ 189, 190, 191, 192, 193, 194, 194, 195, 196, 197, 198,
+ 199, 200, 201, 201, 202, 203, 204, 205, 206, 207, 208,
+ 209, 210, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ 219, 220, 221, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283,
+ 284, 285, 286, 287, 288, 289, 290, 291, 292, 294, 295,
+ 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 307,
+ 308, 309, 310, 311, 312, 313, 314, 315, 316, 318, 319,
+ 320, 321, 322, 323, 324, 325, 327, 328, 329, 330, 331,
+ 332, 333, 335, 336, 337, 338, 339, 340, 341, 343, 344,
+ 345, 346, 347, 348, 350, 351, 352, 353, 354, 355, 357,
+ 358, 359, 360, 361, 363, 364, 365, 366, 367, 369, 370,
+ 371, 372, 373, 375, 376, 377, 378, 379, 381, 382, 383,
+ 384, 386, 387, 388, 389, 390, 392, 393, 394, 395, 397,
+ 398, 399, 400, 402, 403, 404, 405, 407, 408, 409, 410,
+ 412, 413, 414, 416, 417, 418, 419, 421, 422, 423, 425,
+ 426, 427, 428, 430, 431, 432, 434, 435, 436, 437, 439,
+ 440, 441, 443, 444, 445, 447, 448, 449, 451, 452, 453,
+ 455, 456, 457, 459, 460, 461, 463, 464, 465, 467, 468,
+ 469, 471, 472, 474, 475, 476, 478, 479, 480, 482, 483,
+ 484, 486, 487, 489, 490, 491, 493, 494, 496, 497, 498,
+ 500, 501, 503, 504, 505, 507, 508, 510, 511, 512, 514,
+ 515, 517, 518, 519, 521, 522, 524, 525, 527, 528, 530,
+ 531, 532, 534, 535, 537, 538, 540, 541, 543, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 572, 574, 575, 577, 578,
+ 580, 581, 583, 584, 586, 587, 589, 590, 592, 593, 595,
+ 596, 598, 599, 601, 602, 604, 605, 607, 609, 610, 612,
+ 613, 615, 616, 618, 619, 621, 622, 624, 626, 627, 629,
+ 630, 632, 633, 635, 637, 638, 640, 641, 643, 645, 646,
+ 648, 649, 651, 652, 654, 656, 657, 659, 660, 662, 664,
+ 665, 667, 669, 670, 672, 673, 675, 677, 678, 680, 682,
+ 683, 685, 686, 688, 690, 691, 693, 695, 696, 698, 700,
+ 701, 703, 705, 706, 708, 710, 711, 713, 715, 716, 718,
+ 720, 721, 723, 725, 726, 728, 730, 731, 733, 735, 736,
+ 738, 740, 742, 743, 745, 747, 748, 750, 752, 754, 755,
+ 757, 759, 760, 762, 764, 766, 767, 769, 771, 773, 774,
+ 776, 778, 780, 781, 783, 785, 787, 788, 790, 792, 794,
+ 795, 797, 799, 801, 802, 804, 806, 808, 809, 811, 813,
+ 815, 817, 818, 820, 822, 824, 826, 827, 829, 831, 833,
+ 835, 836, 838, 840, 842, 844, 845, 847, 849, 851, 853,
+ 855, 856, 858, 860, 862, 864, 866, 867, 869, 871, 873,
+ 875, 877, 878, 880, 882, 884, 886, 888, 890, 892, 893,
+ 895, 897, 899, 901, 903, 905, 907, 908, 910, 912, 914,
+ 916, 918, 920, 922, 924, 925, 927, 929, 931, 933, 935,
+ 937, 939, 941, 943, 945, 946, 948, 950, 952, 954, 956,
+ 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978,
+ 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999,
+ 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_21[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19,
+ 19, 19, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 50, 50, 50, 51, 51, 52, 52, 53, 53, 54, 54,
+ 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 79, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 91, 92, 93, 93, 94, 94,
+ 95, 96, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 102, 103, 104, 104, 105, 106, 106, 107, 108, 108,
+ 109, 110, 110, 111, 111, 112, 113, 113, 114, 115, 115,
+ 116, 117, 117, 118, 119, 120, 120, 121, 122, 122, 123,
+ 124, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 131, 132, 133, 134, 134, 135, 136, 136, 137, 138, 139,
+ 139, 140, 141, 142, 142, 143, 144, 145, 145, 146, 147,
+ 148, 148, 149, 150, 151, 152, 152, 153, 154, 155, 155,
+ 156, 157, 158, 159, 159, 160, 161, 162, 163, 163, 164,
+ 165, 166, 167, 167, 168, 169, 170, 171, 171, 172, 173,
+ 174, 175, 176, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 227, 228, 229, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
+ 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+ 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265,
+ 266, 267, 268, 269, 271, 272, 273, 274, 275, 276, 277,
+ 278, 279, 280, 281, 282, 283, 284, 285, 286, 288, 289,
+ 290, 291, 292, 293, 294, 295, 296, 297, 298, 300, 301,
+ 302, 303, 304, 305, 306, 307, 308, 310, 311, 312, 313,
+ 314, 315, 316, 317, 319, 320, 321, 322, 323, 324, 326,
+ 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338,
+ 339, 341, 342, 343, 344, 345, 347, 348, 349, 350, 351,
+ 353, 354, 355, 356, 357, 359, 360, 361, 362, 363, 365,
+ 366, 367, 368, 370, 371, 372, 373, 375, 376, 377, 378,
+ 380, 381, 382, 383, 385, 386, 387, 388, 390, 391, 392,
+ 393, 395, 396, 397, 399, 400, 401, 402, 404, 405, 406,
+ 408, 409, 410, 411, 413, 414, 415, 417, 418, 419, 421,
+ 422, 423, 425, 426, 427, 429, 430, 431, 433, 434, 435,
+ 437, 438, 439, 441, 442, 443, 445, 446, 447, 449, 450,
+ 452, 453, 454, 456, 457, 458, 460, 461, 463, 464, 465,
+ 467, 468, 469, 471, 472, 474, 475, 477, 478, 479, 481,
+ 482, 484, 485, 486, 488, 489, 491, 492, 494, 495, 496,
+ 498, 499, 501, 502, 504, 505, 507, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 533, 535, 536, 538, 539, 541, 542, 544, 545,
+ 547, 548, 550, 551, 553, 554, 556, 557, 559, 560, 562,
+ 563, 565, 566, 568, 569, 571, 573, 574, 576, 577, 579,
+ 580, 582, 583, 585, 587, 588, 590, 591, 593, 595, 596,
+ 598, 599, 601, 602, 604, 606, 607, 609, 610, 612, 614,
+ 615, 617, 618, 620, 622, 623, 625, 627, 628, 630, 631,
+ 633, 635, 636, 638, 640, 641, 643, 645, 646, 648, 650,
+ 651, 653, 654, 656, 658, 659, 661, 663, 664, 666, 668,
+ 670, 671, 673, 675, 676, 678, 680, 681, 683, 685, 686,
+ 688, 690, 692, 693, 695, 697, 698, 700, 702, 704, 705,
+ 707, 709, 711, 712, 714, 716, 717, 719, 721, 723, 724,
+ 726, 728, 730, 732, 733, 735, 737, 739, 740, 742, 744,
+ 746, 747, 749, 751, 753, 755, 756, 758, 760, 762, 764,
+ 765, 767, 769, 771, 773, 774, 776, 778, 780, 782, 784,
+ 785, 787, 789, 791, 793, 795, 796, 798, 800, 802, 804,
+ 806, 807, 809, 811, 813, 815, 817, 819, 821, 822, 824,
+ 826, 828, 830, 832, 834, 836, 837, 839, 841, 843, 845,
+ 847, 849, 851, 853, 855, 856, 858, 860, 862, 864, 866,
+ 868, 870, 872, 874, 876, 878, 880, 882, 883, 885, 887,
+ 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909,
+ 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931,
+ 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953,
+ 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975,
+ 977, 979, 981, 984, 986, 988, 990, 992, 994, 996, 998,
+ 1000, 1002, 1004, 1006, 1008, 1010, 1013, 1015, 1017, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_22[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 41, 41, 41, 42, 42, 43,
+ 43, 43, 44, 44, 44, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 49, 49, 49, 50, 50, 51, 51, 52,
+ 52, 52, 53, 53, 54, 54, 55, 55, 55, 56, 56,
+ 57, 57, 58, 58, 59, 59, 60, 60, 61, 61, 61,
+ 62, 62, 63, 63, 64, 64, 65, 65, 66, 66, 67,
+ 67, 68, 68, 69, 69, 70, 70, 71, 71, 72, 72,
+ 73, 73, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 80, 81, 82, 82, 83, 83, 84, 84,
+ 85, 85, 86, 87, 87, 88, 88, 89, 89, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 98, 99, 100, 100, 101, 102, 102, 103, 103, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 110, 111,
+ 112, 112, 113, 114, 114, 115, 116, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 123, 124, 125, 125, 126,
+ 127, 127, 128, 129, 130, 130, 131, 132, 132, 133, 134,
+ 135, 135, 136, 137, 138, 138, 139, 140, 141, 141, 142,
+ 143, 144, 144, 145, 146, 147, 147, 148, 149, 150, 150,
+ 151, 152, 153, 154, 154, 155, 156, 157, 157, 158, 159,
+ 160, 161, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 170, 171, 172, 173, 174, 175, 175, 176, 177,
+ 178, 179, 180, 181, 181, 182, 183, 184, 185, 186, 187,
+ 187, 188, 189, 190, 191, 192, 193, 194, 194, 195, 196,
+ 197, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206,
+ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 274, 275, 276, 277, 278, 279, 280, 281, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 294, 295, 296,
+ 297, 298, 299, 300, 301, 303, 304, 305, 306, 307, 308,
+ 309, 311, 312, 313, 314, 315, 316, 317, 319, 320, 321,
+ 322, 323, 324, 326, 327, 328, 329, 330, 332, 333, 334,
+ 335, 336, 338, 339, 340, 341, 342, 344, 345, 346, 347,
+ 348, 350, 351, 352, 353, 355, 356, 357, 358, 360, 361,
+ 362, 363, 365, 366, 367, 368, 370, 371, 372, 373, 375,
+ 376, 377, 378, 380, 381, 382, 384, 385, 386, 387, 389,
+ 390, 391, 393, 394, 395, 397, 398, 399, 401, 402, 403,
+ 405, 406, 407, 409, 410, 411, 413, 414, 415, 417, 418,
+ 419, 421, 422, 423, 425, 426, 427, 429, 430, 432, 433,
+ 434, 436, 437, 438, 440, 441, 443, 444, 445, 447, 448,
+ 450, 451, 452, 454, 455, 457, 458, 459, 461, 462, 464,
+ 465, 467, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 485, 487, 488, 490, 491, 493, 494, 496,
+ 497, 499, 500, 502, 503, 505, 506, 508, 509, 511, 512,
+ 514, 515, 517, 518, 520, 521, 523, 524, 526, 527, 529,
+ 530, 532, 534, 535, 537, 538, 540, 541, 543, 544, 546,
+ 548, 549, 551, 552, 554, 555, 557, 559, 560, 562, 563,
+ 565, 567, 568, 570, 571, 573, 575, 576, 578, 579, 581,
+ 583, 584, 586, 587, 589, 591, 592, 594, 596, 597, 599,
+ 601, 602, 604, 605, 607, 609, 610, 612, 614, 615, 617,
+ 619, 620, 622, 624, 625, 627, 629, 631, 632, 634, 636,
+ 637, 639, 641, 642, 644, 646, 648, 649, 651, 653, 654,
+ 656, 658, 660, 661, 663, 665, 667, 668, 670, 672, 674,
+ 675, 677, 679, 681, 682, 684, 686, 688, 689, 691, 693,
+ 695, 697, 698, 700, 702, 704, 705, 707, 709, 711, 713,
+ 714, 716, 718, 720, 722, 724, 725, 727, 729, 731, 733,
+ 735, 736, 738, 740, 742, 744, 746, 747, 749, 751, 753,
+ 755, 757, 759, 760, 762, 764, 766, 768, 770, 772, 774,
+ 776, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795,
+ 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816,
+ 818, 820, 822, 824, 826, 828, 829, 831, 833, 835, 837,
+ 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859,
+ 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881,
+ 883, 885, 887, 889, 892, 894, 896, 898, 900, 902, 904,
+ 906, 908, 910, 912, 914, 916, 918, 920, 922, 925, 927,
+ 929, 931, 933, 935, 937, 939, 941, 943, 945, 948, 950,
+ 952, 954, 956, 958, 960, 962, 965, 967, 969, 971, 973,
+ 975, 977, 980, 982, 984, 986, 988, 990, 992, 995, 997,
+ 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1019, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_23[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23,
+ 23, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 27, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30,
+ 30, 30, 30, 31, 31, 31, 32, 32, 32, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 45, 45,
+ 45, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 71, 72, 72, 73, 73, 74, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 83, 84, 85, 85, 86, 86, 87, 87,
+ 88, 89, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 94, 95, 96, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 117, 118, 119, 119, 120, 121, 121, 122,
+ 123, 124, 124, 125, 126, 126, 127, 128, 128, 129, 130,
+ 131, 131, 132, 133, 133, 134, 135, 136, 136, 137, 138,
+ 139, 139, 140, 141, 142, 142, 143, 144, 145, 145, 146,
+ 147, 148, 148, 149, 150, 151, 152, 152, 153, 154, 155,
+ 156, 156, 157, 158, 159, 160, 160, 161, 162, 163, 164,
+ 164, 165, 166, 167, 168, 168, 169, 170, 171, 172, 173,
+ 174, 174, 175, 176, 177, 178, 179, 179, 180, 181, 182,
+ 183, 184, 185, 186, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 222,
+ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 248, 249, 250, 251, 252, 253, 254, 255, 256,
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 268,
+ 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 280,
+ 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292,
+ 293, 294, 295, 296, 298, 299, 300, 301, 302, 303, 304,
+ 306, 307, 308, 309, 310, 311, 313, 314, 315, 316, 317,
+ 319, 320, 321, 322, 323, 325, 326, 327, 328, 329, 331,
+ 332, 333, 334, 335, 337, 338, 339, 340, 342, 343, 344,
+ 345, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358,
+ 359, 360, 362, 363, 364, 366, 367, 368, 369, 371, 372,
+ 373, 375, 376, 377, 379, 380, 381, 383, 384, 385, 386,
+ 388, 389, 390, 392, 393, 394, 396, 397, 399, 400, 401,
+ 403, 404, 405, 407, 408, 409, 411, 412, 414, 415, 416,
+ 418, 419, 420, 422, 423, 425, 426, 427, 429, 430, 432,
+ 433, 435, 436, 437, 439, 440, 442, 443, 444, 446, 447,
+ 449, 450, 452, 453, 455, 456, 458, 459, 460, 462, 463,
+ 465, 466, 468, 469, 471, 472, 474, 475, 477, 478, 480,
+ 481, 483, 484, 486, 487, 489, 490, 492, 493, 495, 496,
+ 498, 499, 501, 502, 504, 506, 507, 509, 510, 512, 513,
+ 515, 516, 518, 520, 521, 523, 524, 526, 527, 529, 531,
+ 532, 534, 535, 537, 539, 540, 542, 543, 545, 547, 548,
+ 550, 551, 553, 555, 556, 558, 560, 561, 563, 565, 566,
+ 568, 569, 571, 573, 574, 576, 578, 579, 581, 583, 584,
+ 586, 588, 590, 591, 593, 595, 596, 598, 600, 601, 603,
+ 605, 606, 608, 610, 612, 613, 615, 617, 619, 620, 622,
+ 624, 625, 627, 629, 631, 632, 634, 636, 638, 640, 641,
+ 643, 645, 647, 648, 650, 652, 654, 655, 657, 659, 661,
+ 663, 664, 666, 668, 670, 672, 674, 675, 677, 679, 681,
+ 683, 684, 686, 688, 690, 692, 694, 696, 697, 699, 701,
+ 703, 705, 707, 709, 710, 712, 714, 716, 718, 720, 722,
+ 724, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743,
+ 745, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764,
+ 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786,
+ 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807,
+ 809, 811, 814, 816, 818, 820, 822, 824, 826, 828, 830,
+ 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852,
+ 854, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875,
+ 878, 880, 882, 884, 886, 888, 890, 892, 894, 897, 899,
+ 901, 903, 905, 907, 909, 912, 914, 916, 918, 920, 922,
+ 925, 927, 929, 931, 933, 936, 938, 940, 942, 944, 946,
+ 949, 951, 953, 955, 958, 960, 962, 964, 966, 969, 971,
+ 973, 975, 978, 980, 982, 984, 987, 989, 991, 993, 996,
+ 998, 1000, 1002, 1005, 1007, 1009, 1012, 1014, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_24[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15,
+ 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 28, 28, 28, 28,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 36, 36, 36, 37, 37, 38, 38, 38, 39, 39, 39,
+ 40, 40, 40, 41, 41, 41, 42, 42, 43, 43, 43,
+ 44, 44, 44, 45, 45, 46, 46, 46, 47, 47, 48,
+ 48, 48, 49, 49, 50, 50, 50, 51, 51, 52, 52,
+ 53, 53, 53, 54, 54, 55, 55, 56, 56, 56, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 79,
+ 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 85,
+ 85, 86, 86, 87, 87, 88, 89, 89, 90, 90, 91,
+ 91, 92, 93, 93, 94, 94, 95, 96, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 106, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 117, 118, 119,
+ 119, 120, 121, 121, 122, 123, 124, 124, 125, 126, 126,
+ 127, 128, 129, 129, 130, 131, 131, 132, 133, 134, 134,
+ 135, 136, 137, 137, 138, 139, 140, 140, 141, 142, 143,
+ 143, 144, 145, 146, 146, 147, 148, 149, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 166, 167, 168, 169,
+ 170, 171, 171, 172, 173, 174, 175, 176, 177, 177, 178,
+ 179, 180, 181, 182, 183, 184, 184, 185, 186, 187, 188,
+ 189, 190, 191, 192, 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 204, 205, 206, 207, 208,
+ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
+ 253, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 268, 269, 270, 271, 272, 273, 274, 275, 276,
+ 277, 279, 280, 281, 282, 283, 284, 285, 287, 288, 289,
+ 290, 291, 292, 293, 295, 296, 297, 298, 299, 300, 302,
+ 303, 304, 305, 306, 308, 309, 310, 311, 312, 314, 315,
+ 316, 317, 318, 320, 321, 322, 323, 324, 326, 327, 328,
+ 329, 331, 332, 333, 334, 336, 337, 338, 339, 341, 342,
+ 343, 344, 346, 347, 348, 350, 351, 352, 353, 355, 356,
+ 357, 359, 360, 361, 363, 364, 365, 367, 368, 369, 370,
+ 372, 373, 374, 376, 377, 378, 380, 381, 383, 384, 385,
+ 387, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 407, 409, 410, 412, 413, 414, 416,
+ 417, 419, 420, 421, 423, 424, 426, 427, 429, 430, 432,
+ 433, 434, 436, 437, 439, 440, 442, 443, 445, 446, 448,
+ 449, 451, 452, 454, 455, 457, 458, 460, 461, 463, 464,
+ 466, 467, 469, 470, 472, 473, 475, 476, 478, 479, 481,
+ 483, 484, 486, 487, 489, 490, 492, 493, 495, 497, 498,
+ 500, 501, 503, 505, 506, 508, 509, 511, 512, 514, 516,
+ 517, 519, 521, 522, 524, 525, 527, 529, 530, 532, 534,
+ 535, 537, 539, 540, 542, 543, 545, 547, 548, 550, 552,
+ 553, 555, 557, 559, 560, 562, 564, 565, 567, 569, 570,
+ 572, 574, 576, 577, 579, 581, 582, 584, 586, 588, 589,
+ 591, 593, 595, 596, 598, 600, 602, 603, 605, 607, 609,
+ 610, 612, 614, 616, 618, 619, 621, 623, 625, 627, 628,
+ 630, 632, 634, 636, 637, 639, 641, 643, 645, 647, 648,
+ 650, 652, 654, 656, 658, 660, 661, 663, 665, 667, 669,
+ 671, 673, 674, 676, 678, 680, 682, 684, 686, 688, 690,
+ 692, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711,
+ 713, 715, 717, 719, 721, 723, 724, 726, 728, 730, 732,
+ 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754,
+ 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 777,
+ 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799,
+ 801, 803, 805, 808, 810, 812, 814, 816, 818, 820, 822,
+ 824, 826, 829, 831, 833, 835, 837, 839, 841, 844, 846,
+ 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 870,
+ 872, 874, 876, 878, 880, 883, 885, 887, 889, 891, 894,
+ 896, 898, 900, 903, 905, 907, 909, 912, 914, 916, 918,
+ 921, 923, 925, 927, 930, 932, 934, 936, 939, 941, 943,
+ 946, 948, 950, 952, 955, 957, 959, 962, 964, 966, 969,
+ 971, 973, 976, 978, 980, 983, 985, 987, 990, 992, 994,
+ 997, 999, 1002, 1004, 1006, 1009, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_25[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17,
+ 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19,
+ 19, 20, 20, 20, 20, 20, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 40, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 45, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 50, 50, 50,
+ 51, 51, 52, 52, 53, 53, 53, 54, 54, 55, 55,
+ 56, 56, 56, 57, 57, 58, 58, 59, 59, 60, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 72, 73, 73, 74, 74, 75, 75, 76, 76,
+ 77, 77, 78, 78, 79, 79, 80, 80, 81, 82, 82,
+ 83, 83, 84, 84, 85, 85, 86, 87, 87, 88, 88,
+ 89, 89, 90, 91, 91, 92, 92, 93, 94, 94, 95,
+ 95, 96, 97, 97, 98, 98, 99, 100, 100, 101, 102,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 110, 111, 112, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 119, 120, 121, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 133, 134, 135, 136, 136, 137, 138, 139, 139,
+ 140, 141, 142, 143, 143, 144, 145, 146, 146, 147, 148,
+ 149, 149, 150, 151, 152, 153, 153, 154, 155, 156, 157,
+ 158, 158, 159, 160, 161, 162, 162, 163, 164, 165, 166,
+ 167, 167, 168, 169, 170, 171, 172, 173, 173, 174, 175,
+ 176, 177, 178, 179, 180, 180, 181, 182, 183, 184, 185,
+ 186, 187, 188, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227,
+ 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
+ 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 260, 261, 262,
+ 263, 264, 265, 266, 267, 268, 270, 271, 272, 273, 274,
+ 275, 276, 277, 279, 280, 281, 282, 283, 284, 286, 287,
+ 288, 289, 290, 291, 293, 294, 295, 296, 297, 298, 300,
+ 301, 302, 303, 304, 306, 307, 308, 309, 311, 312, 313,
+ 314, 315, 317, 318, 319, 320, 322, 323, 324, 325, 327,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 339, 341,
+ 342, 343, 345, 346, 347, 349, 350, 351, 352, 354, 355,
+ 356, 358, 359, 360, 362, 363, 364, 366, 367, 369, 370,
+ 371, 373, 374, 375, 377, 378, 379, 381, 382, 384, 385,
+ 386, 388, 389, 391, 392, 393, 395, 396, 398, 399, 400,
+ 402, 403, 405, 406, 408, 409, 411, 412, 413, 415, 416,
+ 418, 419, 421, 422, 424, 425, 427, 428, 430, 431, 433,
+ 434, 436, 437, 439, 440, 442, 443, 445, 446, 448, 449,
+ 451, 452, 454, 455, 457, 458, 460, 461, 463, 465, 466,
+ 468, 469, 471, 472, 474, 476, 477, 479, 480, 482, 483,
+ 485, 487, 488, 490, 491, 493, 495, 496, 498, 500, 501,
+ 503, 504, 506, 508, 509, 511, 513, 514, 516, 518, 519,
+ 521, 523, 524, 526, 528, 529, 531, 533, 534, 536, 538,
+ 540, 541, 543, 545, 546, 548, 550, 552, 553, 555, 557,
+ 558, 560, 562, 564, 565, 567, 569, 571, 572, 574, 576,
+ 578, 580, 581, 583, 585, 587, 588, 590, 592, 594, 596,
+ 597, 599, 601, 603, 605, 607, 608, 610, 612, 614, 616,
+ 618, 619, 621, 623, 625, 627, 629, 631, 632, 634, 636,
+ 638, 640, 642, 644, 646, 648, 649, 651, 653, 655, 657,
+ 659, 661, 663, 665, 667, 669, 671, 673, 674, 676, 678,
+ 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700,
+ 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722,
+ 724, 726, 728, 730, 732, 734, 736, 739, 741, 743, 745,
+ 747, 749, 751, 753, 755, 757, 759, 761, 763, 766, 768,
+ 770, 772, 774, 776, 778, 780, 782, 785, 787, 789, 791,
+ 793, 795, 797, 800, 802, 804, 806, 808, 810, 813, 815,
+ 817, 819, 821, 824, 826, 828, 830, 832, 835, 837, 839,
+ 841, 843, 846, 848, 850, 852, 855, 857, 859, 861, 864,
+ 866, 868, 870, 873, 875, 877, 880, 882, 884, 886, 889,
+ 891, 893, 896, 898, 900, 903, 905, 907, 910, 912, 914,
+ 917, 919, 921, 924, 926, 928, 931, 933, 935, 938, 940,
+ 942, 945, 947, 950, 952, 954, 957, 959, 962, 964, 966,
+ 969, 971, 974, 976, 979, 981, 983, 986, 988, 991, 993,
+ 996, 998, 1001, 1003, 1006, 1008, 1011, 1013, 1016, 1018, 1021,
+ 1023,
+};
+
+static const u16 xgamma10_26[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21,
+ 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24,
+ 24, 24, 25, 25, 25, 25, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 28, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 80, 80,
+ 81, 81, 82, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 97, 98, 99, 99,
+ 100, 100, 101, 102, 102, 103, 104, 104, 105, 106, 106,
+ 107, 107, 108, 109, 109, 110, 111, 111, 112, 113, 113,
+ 114, 115, 115, 116, 117, 117, 118, 119, 120, 120, 121,
+ 122, 122, 123, 124, 124, 125, 126, 127, 127, 128, 129,
+ 129, 130, 131, 132, 132, 133, 134, 135, 135, 136, 137,
+ 138, 138, 139, 140, 141, 141, 142, 143, 144, 145, 145,
+ 146, 147, 148, 149, 149, 150, 151, 152, 153, 153, 154,
+ 155, 156, 157, 157, 158, 159, 160, 161, 162, 162, 163,
+ 164, 165, 166, 167, 167, 168, 169, 170, 171, 172, 173,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 181, 182,
+ 183, 184, 185, 186, 187, 188, 189, 190, 191, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
+ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225,
+ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236,
+ 237, 238, 239, 240, 241, 242, 243, 245, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 256, 257, 258, 259, 260,
+ 261, 262, 263, 264, 266, 267, 268, 269, 270, 271, 272,
+ 274, 275, 276, 277, 278, 279, 281, 282, 283, 284, 285,
+ 286, 288, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 323, 325, 326,
+ 327, 329, 330, 331, 333, 334, 335, 336, 338, 339, 340,
+ 342, 343, 344, 346, 347, 348, 350, 351, 352, 354, 355,
+ 356, 358, 359, 361, 362, 363, 365, 366, 367, 369, 370,
+ 372, 373, 374, 376, 377, 379, 380, 381, 383, 384, 386,
+ 387, 389, 390, 391, 393, 394, 396, 397, 399, 400, 402,
+ 403, 405, 406, 407, 409, 410, 412, 413, 415, 416, 418,
+ 419, 421, 422, 424, 425, 427, 428, 430, 432, 433, 435,
+ 436, 438, 439, 441, 442, 444, 445, 447, 449, 450, 452,
+ 453, 455, 456, 458, 460, 461, 463, 464, 466, 468, 469,
+ 471, 472, 474, 476, 477, 479, 481, 482, 484, 485, 487,
+ 489, 490, 492, 494, 495, 497, 499, 500, 502, 504, 505,
+ 507, 509, 510, 512, 514, 516, 517, 519, 521, 522, 524,
+ 526, 528, 529, 531, 533, 535, 536, 538, 540, 542, 543,
+ 545, 547, 549, 550, 552, 554, 556, 558, 559, 561, 563,
+ 565, 567, 568, 570, 572, 574, 576, 577, 579, 581, 583,
+ 585, 587, 588, 590, 592, 594, 596, 598, 600, 601, 603,
+ 605, 607, 609, 611, 613, 615, 617, 619, 620, 622, 624,
+ 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646,
+ 648, 650, 651, 653, 655, 657, 659, 661, 663, 665, 667,
+ 669, 671, 673, 675, 677, 679, 681, 683, 685, 688, 690,
+ 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712,
+ 714, 716, 718, 721, 723, 725, 727, 729, 731, 733, 735,
+ 737, 740, 742, 744, 746, 748, 750, 752, 755, 757, 759,
+ 761, 763, 765, 768, 770, 772, 774, 776, 779, 781, 783,
+ 785, 787, 790, 792, 794, 796, 798, 801, 803, 805, 807,
+ 810, 812, 814, 816, 819, 821, 823, 826, 828, 830, 832,
+ 835, 837, 839, 842, 844, 846, 849, 851, 853, 855, 858,
+ 860, 862, 865, 867, 870, 872, 874, 877, 879, 881, 884,
+ 886, 888, 891, 893, 896, 898, 900, 903, 905, 908, 910,
+ 913, 915, 917, 920, 922, 925, 927, 930, 932, 934, 937,
+ 939, 942, 944, 947, 949, 952, 954, 957, 959, 962, 964,
+ 967, 969, 972, 974, 977, 979, 982, 984, 987, 990, 992,
+ 995, 997, 1000, 1002, 1005, 1007, 1010, 1013, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_27[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21,
+ 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
+ 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26,
+ 26, 27, 27, 27, 27, 28, 28, 28, 29, 29, 29,
+ 29, 30, 30, 30, 31, 31, 31, 32, 32, 32, 32,
+ 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36,
+ 36, 37, 37, 37, 38, 38, 38, 39, 39, 39, 40,
+ 40, 40, 41, 41, 41, 42, 42, 43, 43, 43, 44,
+ 44, 44, 45, 45, 46, 46, 46, 47, 47, 47, 48,
+ 48, 49, 49, 49, 50, 50, 51, 51, 51, 52, 52,
+ 53, 53, 54, 54, 54, 55, 55, 56, 56, 57, 57,
+ 57, 58, 58, 59, 59, 60, 60, 61, 61, 61, 62,
+ 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, 67,
+ 68, 68, 69, 69, 70, 70, 71, 71, 72, 72, 73,
+ 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 83, 84, 84,
+ 85, 86, 86, 87, 87, 88, 88, 89, 90, 90, 91,
+ 91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97,
+ 98, 99, 99, 100, 100, 101, 102, 102, 103, 104, 104,
+ 105, 105, 106, 107, 107, 108, 109, 109, 110, 111, 111,
+ 112, 113, 113, 114, 115, 115, 116, 117, 118, 118, 119,
+ 120, 120, 121, 122, 122, 123, 124, 125, 125, 126, 127,
+ 127, 128, 129, 130, 130, 131, 132, 133, 133, 134, 135,
+ 136, 136, 137, 138, 139, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 146, 147, 148, 149, 150, 150, 151, 152,
+ 153, 154, 155, 155, 156, 157, 158, 159, 160, 160, 161,
+ 162, 163, 164, 165, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231, 233, 234, 235,
+ 236, 237, 238, 239, 240, 241, 242, 243, 244, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 256, 257, 258, 259,
+ 260, 261, 262, 263, 265, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 283, 285,
+ 286, 287, 288, 290, 291, 292, 293, 294, 296, 297, 298,
+ 299, 301, 302, 303, 304, 306, 307, 308, 309, 311, 312,
+ 313, 315, 316, 317, 318, 320, 321, 322, 324, 325, 326,
+ 328, 329, 330, 332, 333, 334, 336, 337, 338, 340, 341,
+ 342, 344, 345, 346, 348, 349, 351, 352, 353, 355, 356,
+ 357, 359, 360, 362, 363, 364, 366, 367, 369, 370, 372,
+ 373, 374, 376, 377, 379, 380, 382, 383, 385, 386, 387,
+ 389, 390, 392, 393, 395, 396, 398, 399, 401, 402, 404,
+ 405, 407, 408, 410, 411, 413, 414, 416, 417, 419, 421,
+ 422, 424, 425, 427, 428, 430, 431, 433, 435, 436, 438,
+ 439, 441, 442, 444, 446, 447, 449, 450, 452, 454, 455,
+ 457, 459, 460, 462, 463, 465, 467, 468, 470, 472, 473,
+ 475, 477, 478, 480, 482, 483, 485, 487, 488, 490, 492,
+ 494, 495, 497, 499, 500, 502, 504, 506, 507, 509, 511,
+ 513, 514, 516, 518, 520, 521, 523, 525, 527, 528, 530,
+ 532, 534, 536, 537, 539, 541, 543, 545, 546, 548, 550,
+ 552, 554, 556, 557, 559, 561, 563, 565, 567, 569, 570,
+ 572, 574, 576, 578, 580, 582, 584, 586, 587, 589, 591,
+ 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613,
+ 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634,
+ 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656,
+ 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679,
+ 681, 683, 685, 688, 690, 692, 694, 696, 698, 700, 702,
+ 705, 707, 709, 711, 713, 715, 717, 720, 722, 724, 726,
+ 728, 730, 733, 735, 737, 739, 741, 744, 746, 748, 750,
+ 752, 755, 757, 759, 761, 764, 766, 768, 770, 773, 775,
+ 777, 779, 782, 784, 786, 789, 791, 793, 795, 798, 800,
+ 802, 805, 807, 809, 812, 814, 816, 819, 821, 823, 826,
+ 828, 831, 833, 835, 838, 840, 842, 845, 847, 850, 852,
+ 854, 857, 859, 862, 864, 867, 869, 871, 874, 876, 879,
+ 881, 884, 886, 889, 891, 894, 896, 899, 901, 903, 906,
+ 908, 911, 914, 916, 919, 921, 924, 926, 929, 931, 934,
+ 936, 939, 941, 944, 947, 949, 952, 954, 957, 959, 962,
+ 965, 967, 970, 973, 975, 978, 980, 983, 986, 988, 991,
+ 994, 996, 999, 1002, 1004, 1007, 1010, 1012, 1015, 1018, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_28[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26,
+ 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 29,
+ 29, 29, 29, 30, 30, 30, 31, 31, 31, 31, 32,
+ 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35,
+ 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39,
+ 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43,
+ 43, 43, 44, 44, 45, 45, 45, 46, 46, 46, 47,
+ 47, 48, 48, 48, 49, 49, 50, 50, 50, 51, 51,
+ 52, 52, 52, 53, 53, 54, 54, 55, 55, 55, 56,
+ 56, 57, 57, 58, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 70, 70, 71, 71,
+ 72, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 77, 78, 79, 79, 80, 80, 81, 81, 82, 82, 83,
+ 83, 84, 85, 85, 86, 86, 87, 87, 88, 89, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 96, 97, 98, 98, 99, 99, 100, 101, 101, 102, 103,
+ 103, 104, 105, 105, 106, 106, 107, 108, 108, 109, 110,
+ 110, 111, 112, 112, 113, 114, 115, 115, 116, 117, 117,
+ 118, 119, 119, 120, 121, 122, 122, 123, 124, 124, 125,
+ 126, 127, 127, 128, 129, 130, 130, 131, 132, 132, 133,
+ 134, 135, 136, 136, 137, 138, 139, 139, 140, 141, 142,
+ 143, 143, 144, 145, 146, 146, 147, 148, 149, 150, 151,
+ 151, 152, 153, 154, 155, 155, 156, 157, 158, 159, 160,
+ 161, 161, 162, 163, 164, 165, 166, 167, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 228, 229, 230, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 249, 251, 252, 253, 254, 255, 256, 257, 259,
+ 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 272,
+ 273, 274, 275, 276, 278, 279, 280, 281, 282, 284, 285,
+ 286, 287, 289, 290, 291, 292, 294, 295, 296, 297, 299,
+ 300, 301, 302, 304, 305, 306, 308, 309, 310, 311, 313,
+ 314, 315, 317, 318, 319, 321, 322, 323, 325, 326, 327,
+ 329, 330, 331, 333, 334, 336, 337, 338, 340, 341, 342,
+ 344, 345, 347, 348, 349, 351, 352, 354, 355, 356, 358,
+ 359, 361, 362, 364, 365, 366, 368, 369, 371, 372, 374,
+ 375, 377, 378, 380, 381, 383, 384, 386, 387, 389, 390,
+ 392, 393, 395, 396, 398, 399, 401, 402, 404, 405, 407,
+ 408, 410, 412, 413, 415, 416, 418, 419, 421, 423, 424,
+ 426, 427, 429, 431, 432, 434, 435, 437, 439, 440, 442,
+ 444, 445, 447, 448, 450, 452, 453, 455, 457, 458, 460,
+ 462, 463, 465, 467, 468, 470, 472, 474, 475, 477, 479,
+ 480, 482, 484, 486, 487, 489, 491, 493, 494, 496, 498,
+ 500, 501, 503, 505, 507, 509, 510, 512, 514, 516, 518,
+ 519, 521, 523, 525, 527, 528, 530, 532, 534, 536, 538,
+ 539, 541, 543, 545, 547, 549, 551, 553, 554, 556, 558,
+ 560, 562, 564, 566, 568, 570, 572, 574, 575, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623,
+ 625, 627, 629, 631, 633, 635, 637, 640, 642, 644, 646,
+ 648, 650, 652, 654, 656, 658, 660, 663, 665, 667, 669,
+ 671, 673, 675, 678, 680, 682, 684, 686, 688, 690, 693,
+ 695, 697, 699, 701, 704, 706, 708, 710, 712, 715, 717,
+ 719, 721, 724, 726, 728, 730, 733, 735, 737, 739, 742,
+ 744, 746, 749, 751, 753, 755, 758, 760, 762, 765, 767,
+ 769, 772, 774, 776, 779, 781, 783, 786, 788, 790, 793,
+ 795, 798, 800, 802, 805, 807, 810, 812, 814, 817, 819,
+ 822, 824, 827, 829, 831, 834, 836, 839, 841, 844, 846,
+ 849, 851, 854, 856, 859, 861, 864, 866, 869, 871, 874,
+ 876, 879, 881, 884, 887, 889, 892, 894, 897, 899, 902,
+ 905, 907, 910, 912, 915, 918, 920, 923, 925, 928, 931,
+ 933, 936, 939, 941, 944, 947, 949, 952, 955, 957, 960,
+ 963, 965, 968, 971, 973, 976, 979, 982, 984, 987, 990,
+ 992, 995, 998, 1001, 1004, 1006, 1009, 1012, 1015, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_29[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 28, 28, 28,
+ 28, 29, 29, 29, 29, 30, 30, 30, 31, 31, 31,
+ 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38,
+ 38, 39, 39, 39, 40, 40, 41, 41, 41, 42, 42,
+ 42, 43, 43, 43, 44, 44, 44, 45, 45, 46, 46,
+ 46, 47, 47, 48, 48, 48, 49, 49, 49, 50, 50,
+ 51, 51, 52, 52, 52, 53, 53, 54, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 61, 62, 62, 63, 63, 64, 64, 65,
+ 65, 66, 66, 67, 67, 68, 68, 69, 69, 70, 70,
+ 71, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 77, 78, 78, 79, 80, 80, 81, 81, 82,
+ 82, 83, 83, 84, 85, 85, 86, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 93, 94, 95,
+ 95, 96, 96, 97, 98, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 109, 110, 111, 111, 112, 113, 113, 114, 115, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 122, 123, 124,
+ 125, 125, 126, 127, 128, 128, 129, 130, 131, 131, 132,
+ 133, 134, 134, 135, 136, 137, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 145, 146, 147, 148, 149, 149,
+ 150, 151, 152, 153, 154, 154, 155, 156, 157, 158, 159,
+ 160, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 225, 226, 227, 228, 229, 231, 232, 233, 234,
+ 235, 236, 237, 238, 239, 241, 242, 243, 244, 245, 246,
+ 247, 248, 250, 251, 252, 253, 254, 255, 257, 258, 259,
+ 260, 261, 263, 264, 265, 266, 267, 269, 270, 271, 272,
+ 273, 275, 276, 277, 278, 280, 281, 282, 283, 285, 286,
+ 287, 288, 290, 291, 292, 293, 295, 296, 297, 299, 300,
+ 301, 302, 304, 305, 306, 308, 309, 310, 312, 313, 314,
+ 316, 317, 318, 320, 321, 322, 324, 325, 327, 328, 329,
+ 331, 332, 333, 335, 336, 338, 339, 340, 342, 343, 345,
+ 346, 348, 349, 350, 352, 353, 355, 356, 358, 359, 361,
+ 362, 363, 365, 366, 368, 369, 371, 372, 374, 375, 377,
+ 378, 380, 381, 383, 384, 386, 388, 389, 391, 392, 394,
+ 395, 397, 398, 400, 402, 403, 405, 406, 408, 409, 411,
+ 413, 414, 416, 417, 419, 421, 422, 424, 426, 427, 429,
+ 430, 432, 434, 435, 437, 439, 440, 442, 444, 445, 447,
+ 449, 450, 452, 454, 456, 457, 459, 461, 462, 464, 466,
+ 468, 469, 471, 473, 475, 476, 478, 480, 482, 483, 485,
+ 487, 489, 491, 492, 494, 496, 498, 500, 501, 503, 505,
+ 507, 509, 511, 512, 514, 516, 518, 520, 522, 524, 525,
+ 527, 529, 531, 533, 535, 537, 539, 541, 542, 544, 546,
+ 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590,
+ 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612,
+ 614, 616, 618, 621, 623, 625, 627, 629, 631, 633, 635,
+ 637, 640, 642, 644, 646, 648, 650, 652, 655, 657, 659,
+ 661, 663, 665, 668, 670, 672, 674, 676, 679, 681, 683,
+ 685, 688, 690, 692, 694, 697, 699, 701, 703, 706, 708,
+ 710, 712, 715, 717, 719, 722, 724, 726, 729, 731, 733,
+ 736, 738, 740, 743, 745, 747, 750, 752, 754, 757, 759,
+ 762, 764, 766, 769, 771, 774, 776, 778, 781, 783, 786,
+ 788, 791, 793, 795, 798, 800, 803, 805, 808, 810, 813,
+ 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 841,
+ 843, 846, 848, 851, 853, 856, 859, 861, 864, 866, 869,
+ 872, 874, 877, 879, 882, 885, 887, 890, 893, 895, 898,
+ 901, 903, 906, 909, 911, 914, 917, 919, 922, 925, 927,
+ 930, 933, 936, 938, 941, 944, 947, 949, 952, 955, 958,
+ 960, 963, 966, 969, 972, 974, 977, 980, 983, 986, 989,
+ 991, 994, 997, 1000, 1003, 1006, 1009, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_30[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25,
+ 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 30, 30, 30, 30, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 33, 34, 34,
+ 34, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41,
+ 42, 42, 42, 43, 43, 43, 44, 44, 45, 45, 45,
+ 46, 46, 46, 47, 47, 48, 48, 48, 49, 49, 50,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 58, 59,
+ 59, 60, 60, 61, 61, 62, 62, 63, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 74, 75,
+ 75, 76, 77, 77, 78, 78, 79, 79, 80, 80, 81,
+ 81, 82, 83, 83, 84, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 89, 90, 91, 91, 92, 92, 93, 94,
+ 94, 95, 95, 96, 97, 97, 98, 99, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 106, 107, 108,
+ 108, 109, 110, 110, 111, 112, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 122, 123,
+ 124, 125, 125, 126, 127, 128, 128, 129, 130, 131, 131,
+ 132, 133, 134, 134, 135, 136, 137, 137, 138, 139, 140,
+ 141, 141, 142, 143, 144, 145, 146, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 185, 186, 187, 188,
+ 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207, 208, 210, 211,
+ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ 223, 224, 226, 227, 228, 229, 230, 231, 232, 233, 234,
+ 236, 237, 238, 239, 240, 241, 242, 244, 245, 246, 247,
+ 248, 249, 250, 252, 253, 254, 255, 256, 258, 259, 260,
+ 261, 262, 264, 265, 266, 267, 269, 270, 271, 272, 273,
+ 275, 276, 277, 278, 280, 281, 282, 284, 285, 286, 287,
+ 289, 290, 291, 293, 294, 295, 296, 298, 299, 300, 302,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 315, 317,
+ 318, 319, 321, 322, 324, 325, 326, 328, 329, 331, 332,
+ 333, 335, 336, 338, 339, 341, 342, 343, 345, 346, 348,
+ 349, 351, 352, 354, 355, 357, 358, 360, 361, 363, 364,
+ 366, 367, 369, 370, 372, 373, 375, 376, 378, 379, 381,
+ 383, 384, 386, 387, 389, 390, 392, 394, 395, 397, 398,
+ 400, 402, 403, 405, 406, 408, 410, 411, 413, 415, 416,
+ 418, 419, 421, 423, 424, 426, 428, 429, 431, 433, 435,
+ 436, 438, 440, 441, 443, 445, 447, 448, 450, 452, 453,
+ 455, 457, 459, 460, 462, 464, 466, 468, 469, 471, 473,
+ 475, 477, 478, 480, 482, 484, 486, 487, 489, 491, 493,
+ 495, 497, 498, 500, 502, 504, 506, 508, 510, 512, 513,
+ 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535,
+ 537, 539, 540, 542, 544, 546, 548, 550, 552, 554, 556,
+ 558, 560, 562, 564, 566, 568, 570, 572, 574, 577, 579,
+ 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601,
+ 604, 606, 608, 610, 612, 614, 616, 618, 621, 623, 625,
+ 627, 629, 631, 634, 636, 638, 640, 642, 645, 647, 649,
+ 651, 653, 656, 658, 660, 662, 665, 667, 669, 671, 674,
+ 676, 678, 680, 683, 685, 687, 690, 692, 694, 697, 699,
+ 701, 704, 706, 708, 711, 713, 715, 718, 720, 722, 725,
+ 727, 730, 732, 734, 737, 739, 742, 744, 746, 749, 751,
+ 754, 756, 759, 761, 764, 766, 769, 771, 774, 776, 779,
+ 781, 784, 786, 789, 791, 794, 796, 799, 801, 804, 806,
+ 809, 812, 814, 817, 819, 822, 824, 827, 830, 832, 835,
+ 837, 840, 843, 845, 848, 851, 853, 856, 859, 861, 864,
+ 867, 869, 872, 875, 878, 880, 883, 886, 888, 891, 894,
+ 897, 899, 902, 905, 908, 910, 913, 916, 919, 922, 924,
+ 927, 930, 933, 936, 938, 941, 944, 947, 950, 953, 956,
+ 958, 961, 964, 967, 970, 973, 976, 979, 982, 984, 987,
+ 990, 993, 996, 999, 1002, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_31[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 30, 30, 30, 30,
+ 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 41, 41,
+ 41, 42, 42, 42, 43, 43, 43, 44, 44, 44, 45,
+ 45, 46, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 57, 58, 58,
+ 59, 59, 60, 60, 61, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 75, 76, 76, 77, 77, 78, 79, 79, 80, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 92, 93,
+ 94, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 111, 111, 112, 113, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 144, 145, 146, 147, 148, 149,
+ 149, 150, 151, 152, 153, 154, 155, 155, 156, 157, 158,
+ 159, 160, 161, 162, 162, 163, 164, 165, 166, 167, 168,
+ 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212, 213, 214, 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 232, 233, 234, 236,
+ 237, 238, 239, 240, 241, 243, 244, 245, 246, 247, 248,
+ 250, 251, 252, 253, 254, 256, 257, 258, 259, 260, 262,
+ 263, 264, 265, 267, 268, 269, 270, 272, 273, 274, 275,
+ 277, 278, 279, 281, 282, 283, 285, 286, 287, 288, 290,
+ 291, 292, 294, 295, 296, 298, 299, 300, 302, 303, 305,
+ 306, 307, 309, 310, 311, 313, 314, 316, 317, 318, 320,
+ 321, 323, 324, 325, 327, 328, 330, 331, 333, 334, 336,
+ 337, 338, 340, 341, 343, 344, 346, 347, 349, 350, 352,
+ 353, 355, 356, 358, 359, 361, 362, 364, 366, 367, 369,
+ 370, 372, 373, 375, 376, 378, 380, 381, 383, 384, 386,
+ 388, 389, 391, 392, 394, 396, 397, 399, 401, 402, 404,
+ 406, 407, 409, 411, 412, 414, 416, 417, 419, 421, 422,
+ 424, 426, 427, 429, 431, 433, 434, 436, 438, 440, 441,
+ 443, 445, 447, 448, 450, 452, 454, 456, 457, 459, 461,
+ 463, 465, 466, 468, 470, 472, 474, 476, 477, 479, 481,
+ 483, 485, 487, 489, 490, 492, 494, 496, 498, 500, 502,
+ 504, 506, 508, 510, 511, 513, 515, 517, 519, 521, 523,
+ 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545,
+ 547, 549, 551, 553, 555, 557, 559, 561, 564, 566, 568,
+ 570, 572, 574, 576, 578, 580, 582, 584, 587, 589, 591,
+ 593, 595, 597, 599, 602, 604, 606, 608, 610, 613, 615,
+ 617, 619, 621, 624, 626, 628, 630, 632, 635, 637, 639,
+ 641, 644, 646, 648, 651, 653, 655, 657, 660, 662, 664,
+ 667, 669, 671, 674, 676, 678, 681, 683, 685, 688, 690,
+ 692, 695, 697, 700, 702, 704, 707, 709, 712, 714, 717,
+ 719, 721, 724, 726, 729, 731, 734, 736, 739, 741, 744,
+ 746, 749, 751, 754, 756, 759, 761, 764, 766, 769, 772,
+ 774, 777, 779, 782, 784, 787, 790, 792, 795, 797, 800,
+ 803, 805, 808, 811, 813, 816, 819, 821, 824, 827, 829,
+ 832, 835, 837, 840, 843, 845, 848, 851, 854, 856, 859,
+ 862, 865, 867, 870, 873, 876, 879, 881, 884, 887, 890,
+ 893, 895, 898, 901, 904, 907, 910, 913, 915, 918, 921,
+ 924, 927, 930, 933, 936, 939, 942, 945, 947, 950, 953,
+ 956, 959, 962, 965, 968, 971, 974, 977, 980, 983, 986,
+ 989, 992, 995, 998, 1001, 1005, 1008, 1011, 1014, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_32[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 30, 30, 30,
+ 30, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 48, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 51, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 57, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 77, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 95, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 114,
+ 115, 116, 117, 117, 118, 119, 120, 120, 121, 122, 122,
+ 123, 124, 125, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 132, 133, 134, 135, 136, 136, 137, 138, 139, 140,
+ 140, 141, 142, 143, 144, 145, 145, 146, 147, 148, 149,
+ 150, 150, 151, 152, 153, 154, 155, 156, 157, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 166, 167, 168,
+ 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ 180, 181, 182, 183, 184, 185, 186, 186, 187, 188, 189,
+ 190, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 214, 215, 216, 217, 218, 219, 220, 221, 222, 224, 225,
+ 226, 227, 228, 229, 230, 231, 233, 234, 235, 236, 237,
+ 238, 240, 241, 242, 243, 244, 246, 247, 248, 249, 250,
+ 252, 253, 254, 255, 257, 258, 259, 260, 262, 263, 264,
+ 265, 267, 268, 269, 270, 272, 273, 274, 276, 277, 278,
+ 280, 281, 282, 283, 285, 286, 287, 289, 290, 291, 293,
+ 294, 296, 297, 298, 300, 301, 302, 304, 305, 307, 308,
+ 309, 311, 312, 314, 315, 316, 318, 319, 321, 322, 324,
+ 325, 327, 328, 330, 331, 332, 334, 335, 337, 338, 340,
+ 341, 343, 344, 346, 347, 349, 351, 352, 354, 355, 357,
+ 358, 360, 361, 363, 364, 366, 368, 369, 371, 372, 374,
+ 376, 377, 379, 380, 382, 384, 385, 387, 389, 390, 392,
+ 394, 395, 397, 399, 400, 402, 404, 405, 407, 409, 410,
+ 412, 414, 416, 417, 419, 421, 423, 424, 426, 428, 430,
+ 431, 433, 435, 437, 438, 440, 442, 444, 446, 447, 449,
+ 451, 453, 455, 457, 458, 460, 462, 464, 466, 468, 469,
+ 471, 473, 475, 477, 479, 481, 483, 485, 487, 488, 490,
+ 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512,
+ 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534,
+ 536, 538, 540, 542, 544, 547, 549, 551, 553, 555, 557,
+ 559, 561, 563, 565, 568, 570, 572, 574, 576, 578, 581,
+ 583, 585, 587, 589, 591, 594, 596, 598, 600, 602, 605,
+ 607, 609, 611, 614, 616, 618, 620, 623, 625, 627, 630,
+ 632, 634, 636, 639, 641, 643, 646, 648, 650, 653, 655,
+ 657, 660, 662, 665, 667, 669, 672, 674, 677, 679, 681,
+ 684, 686, 689, 691, 694, 696, 698, 701, 703, 706, 708,
+ 711, 713, 716, 718, 721, 723, 726, 728, 731, 734, 736,
+ 739, 741, 744, 746, 749, 751, 754, 757, 759, 762, 765,
+ 767, 770, 772, 775, 778, 780, 783, 786, 788, 791, 794,
+ 796, 799, 802, 804, 807, 810, 813, 815, 818, 821, 824,
+ 826, 829, 832, 835, 838, 840, 843, 846, 849, 852, 854,
+ 857, 860, 863, 866, 869, 871, 874, 877, 880, 883, 886,
+ 889, 892, 895, 897, 900, 903, 906, 909, 912, 915, 918,
+ 921, 924, 927, 930, 933, 936, 939, 942, 945, 948, 951,
+ 954, 957, 960, 963, 967, 970, 973, 976, 979, 982, 985,
+ 988, 991, 994, 998, 1001, 1004, 1007, 1010, 1013, 1017, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_33[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 44, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 77, 78, 79, 79, 80,
+ 80, 81, 81, 82, 82, 83, 84, 84, 85, 85, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 91, 92, 93,
+ 93, 94, 94, 95, 96, 96, 97, 98, 98, 99, 100,
+ 100, 101, 102, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112, 113, 114, 115,
+ 115, 116, 117, 118, 118, 119, 120, 120, 121, 122, 123,
+ 123, 124, 125, 126, 127, 127, 128, 129, 130, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 138, 139, 140,
+ 141, 142, 143, 143, 144, 145, 146, 147, 148, 148, 149,
+ 150, 151, 152, 153, 154, 155, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202,
+ 203, 204, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 218, 219, 220, 221, 222, 223, 224, 226, 227,
+ 228, 229, 230, 231, 233, 234, 235, 236, 237, 238, 240,
+ 241, 242, 243, 245, 246, 247, 248, 249, 251, 252, 253,
+ 254, 256, 257, 258, 259, 261, 262, 263, 265, 266, 267,
+ 268, 270, 271, 272, 274, 275, 276, 278, 279, 280, 282,
+ 283, 284, 286, 287, 288, 290, 291, 292, 294, 295, 297,
+ 298, 299, 301, 302, 304, 305, 307, 308, 309, 311, 312,
+ 314, 315, 317, 318, 320, 321, 322, 324, 325, 327, 328,
+ 330, 331, 333, 334, 336, 337, 339, 341, 342, 344, 345,
+ 347, 348, 350, 351, 353, 355, 356, 358, 359, 361, 362,
+ 364, 366, 367, 369, 371, 372, 374, 375, 377, 379, 380,
+ 382, 384, 385, 387, 389, 390, 392, 394, 395, 397, 399,
+ 401, 402, 404, 406, 408, 409, 411, 413, 414, 416, 418,
+ 420, 422, 423, 425, 427, 429, 431, 432, 434, 436, 438,
+ 440, 441, 443, 445, 447, 449, 451, 453, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 473, 475, 477, 479,
+ 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501,
+ 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523,
+ 525, 528, 530, 532, 534, 536, 538, 540, 542, 544, 547,
+ 549, 551, 553, 555, 557, 559, 562, 564, 566, 568, 570,
+ 573, 575, 577, 579, 581, 584, 586, 588, 590, 593, 595,
+ 597, 599, 602, 604, 606, 609, 611, 613, 615, 618, 620,
+ 622, 625, 627, 629, 632, 634, 637, 639, 641, 644, 646,
+ 648, 651, 653, 656, 658, 661, 663, 665, 668, 670, 673,
+ 675, 678, 680, 683, 685, 688, 690, 693, 695, 698, 700,
+ 703, 705, 708, 710, 713, 716, 718, 721, 723, 726, 729,
+ 731, 734, 736, 739, 742, 744, 747, 750, 752, 755, 758,
+ 760, 763, 766, 768, 771, 774, 776, 779, 782, 785, 787,
+ 790, 793, 796, 798, 801, 804, 807, 810, 812, 815, 818,
+ 821, 824, 827, 829, 832, 835, 838, 841, 844, 847, 850,
+ 852, 855, 858, 861, 864, 867, 870, 873, 876, 879, 882,
+ 885, 888, 891, 894, 897, 900, 903, 906, 909, 912, 915,
+ 918, 921, 924, 927, 930, 933, 937, 940, 943, 946, 949,
+ 952, 955, 958, 962, 965, 968, 971, 974, 978, 981, 984,
+ 987, 990, 994, 997, 1000, 1003, 1007, 1010, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_34[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9,
+ 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 12,
+ 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 30, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 46, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 52, 53,
+ 53, 54, 54, 55, 55, 55, 56, 56, 57, 57, 58,
+ 58, 59, 59, 59, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 74, 75, 75, 76, 76, 77, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 83, 83, 84, 84, 85, 86, 86,
+ 87, 87, 88, 88, 89, 90, 90, 91, 92, 92, 93,
+ 93, 94, 95, 95, 96, 97, 97, 98, 99, 99, 100,
+ 101, 101, 102, 102, 103, 104, 105, 105, 106, 107, 107,
+ 108, 109, 109, 110, 111, 111, 112, 113, 114, 114, 115,
+ 116, 117, 117, 118, 119, 119, 120, 121, 122, 123, 123,
+ 124, 125, 126, 126, 127, 128, 129, 129, 130, 131, 132,
+ 133, 133, 134, 135, 136, 137, 138, 138, 139, 140, 141,
+ 142, 143, 143, 144, 145, 146, 147, 148, 149, 149, 150,
+ 151, 152, 153, 154, 155, 156, 157, 157, 158, 159, 160,
+ 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+ 172, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
+ 182, 183, 184, 185, 186, 187, 188, 189, 191, 192, 193,
+ 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+ 205, 207, 208, 209, 210, 211, 212, 213, 214, 215, 217,
+ 218, 219, 220, 221, 222, 223, 225, 226, 227, 228, 229,
+ 231, 232, 233, 234, 235, 237, 238, 239, 240, 241, 243,
+ 244, 245, 246, 248, 249, 250, 251, 253, 254, 255, 256,
+ 258, 259, 260, 262, 263, 264, 266, 267, 268, 269, 271,
+ 272, 273, 275, 276, 278, 279, 280, 282, 283, 284, 286,
+ 287, 288, 290, 291, 293, 294, 296, 297, 298, 300, 301,
+ 303, 304, 306, 307, 308, 310, 311, 313, 314, 316, 317,
+ 319, 320, 322, 323, 325, 326, 328, 329, 331, 332, 334,
+ 335, 337, 339, 340, 342, 343, 345, 346, 348, 350, 351,
+ 353, 354, 356, 358, 359, 361, 363, 364, 366, 367, 369,
+ 371, 372, 374, 376, 377, 379, 381, 383, 384, 386, 388,
+ 389, 391, 393, 395, 396, 398, 400, 402, 403, 405, 407,
+ 409, 410, 412, 414, 416, 418, 419, 421, 423, 425, 427,
+ 429, 430, 432, 434, 436, 438, 440, 442, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 464, 466, 468,
+ 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490,
+ 492, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513,
+ 515, 517, 519, 521, 523, 526, 528, 530, 532, 534, 536,
+ 538, 541, 543, 545, 547, 549, 551, 554, 556, 558, 560,
+ 563, 565, 567, 569, 572, 574, 576, 578, 581, 583, 585,
+ 587, 590, 592, 594, 597, 599, 601, 604, 606, 608, 611,
+ 613, 615, 618, 620, 623, 625, 627, 630, 632, 635, 637,
+ 640, 642, 644, 647, 649, 652, 654, 657, 659, 662, 664,
+ 667, 669, 672, 674, 677, 679, 682, 685, 687, 690, 692,
+ 695, 697, 700, 703, 705, 708, 711, 713, 716, 718, 721,
+ 724, 726, 729, 732, 734, 737, 740, 743, 745, 748, 751,
+ 753, 756, 759, 762, 764, 767, 770, 773, 776, 778, 781,
+ 784, 787, 790, 793, 795, 798, 801, 804, 807, 810, 813,
+ 815, 818, 821, 824, 827, 830, 833, 836, 839, 842, 845,
+ 848, 851, 854, 857, 860, 863, 866, 869, 872, 875, 878,
+ 881, 884, 887, 890, 893, 896, 899, 903, 906, 909, 912,
+ 915, 918, 921, 925, 928, 931, 934, 937, 940, 944, 947,
+ 950, 953, 957, 960, 963, 966, 970, 973, 976, 979, 983,
+ 986, 989, 993, 996, 999, 1003, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_35[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13,
+ 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22,
+ 22, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24,
+ 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27,
+ 27, 27, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 33, 34, 34, 34, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 42, 43, 43, 43, 44, 44,
+ 45, 45, 45, 46, 46, 47, 47, 47, 48, 48, 48,
+ 49, 49, 50, 50, 50, 51, 51, 52, 52, 53, 53,
+ 53, 54, 54, 55, 55, 56, 56, 56, 57, 57, 58,
+ 58, 59, 59, 60, 60, 60, 61, 61, 62, 62, 63,
+ 63, 64, 64, 65, 65, 66, 66, 67, 67, 68, 68,
+ 69, 69, 70, 70, 71, 71, 72, 72, 73, 73, 74,
+ 75, 75, 76, 76, 77, 77, 78, 78, 79, 79, 80,
+ 81, 81, 82, 82, 83, 84, 84, 85, 85, 86, 86,
+ 87, 88, 88, 89, 89, 90, 91, 91, 92, 93, 93,
+ 94, 95, 95, 96, 96, 97, 98, 98, 99, 100, 100,
+ 101, 102, 102, 103, 104, 104, 105, 106, 107, 107, 108,
+ 109, 109, 110, 111, 111, 112, 113, 114, 114, 115, 116,
+ 117, 117, 118, 119, 120, 120, 121, 122, 123, 123, 124,
+ 125, 126, 126, 127, 128, 129, 130, 130, 131, 132, 133,
+ 134, 135, 135, 136, 137, 138, 139, 140, 140, 141, 142,
+ 143, 144, 145, 146, 146, 147, 148, 149, 150, 151, 152,
+ 153, 154, 154, 155, 156, 157, 158, 159, 160, 161, 162,
+ 163, 164, 165, 166, 167, 167, 168, 169, 170, 171, 172,
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195,
+ 196, 197, 198, 199, 200, 201, 202, 204, 205, 206, 207,
+ 208, 209, 210, 211, 213, 214, 215, 216, 217, 218, 219,
+ 221, 222, 223, 224, 225, 227, 228, 229, 230, 231, 233,
+ 234, 235, 236, 237, 239, 240, 241, 242, 244, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 256, 258, 259, 260,
+ 262, 263, 264, 266, 267, 268, 270, 271, 272, 274, 275,
+ 277, 278, 279, 281, 282, 284, 285, 286, 288, 289, 291,
+ 292, 293, 295, 296, 298, 299, 301, 302, 304, 305, 307,
+ 308, 310, 311, 313, 314, 316, 317, 319, 320, 322, 323,
+ 325, 326, 328, 329, 331, 332, 334, 336, 337, 339, 340,
+ 342, 344, 345, 347, 348, 350, 352, 353, 355, 357, 358,
+ 360, 362, 363, 365, 367, 368, 370, 372, 373, 375, 377,
+ 378, 380, 382, 384, 385, 387, 389, 391, 392, 394, 396,
+ 398, 400, 401, 403, 405, 407, 409, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 436,
+ 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458,
+ 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480,
+ 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 503,
+ 505, 507, 509, 511, 513, 515, 517, 520, 522, 524, 526,
+ 528, 531, 533, 535, 537, 539, 542, 544, 546, 548, 550,
+ 553, 555, 557, 560, 562, 564, 566, 569, 571, 573, 576,
+ 578, 580, 583, 585, 587, 590, 592, 594, 597, 599, 602,
+ 604, 606, 609, 611, 614, 616, 618, 621, 623, 626, 628,
+ 631, 633, 636, 638, 641, 643, 646, 648, 651, 653, 656,
+ 658, 661, 664, 666, 669, 671, 674, 677, 679, 682, 684,
+ 687, 690, 692, 695, 698, 700, 703, 706, 708, 711, 714,
+ 716, 719, 722, 725, 727, 730, 733, 736, 738, 741, 744,
+ 747, 750, 752, 755, 758, 761, 764, 766, 769, 772, 775,
+ 778, 781, 784, 787, 789, 792, 795, 798, 801, 804, 807,
+ 810, 813, 816, 819, 822, 825, 828, 831, 834, 837, 840,
+ 843, 846, 849, 852, 855, 858, 862, 865, 868, 871, 874,
+ 877, 880, 883, 887, 890, 893, 896, 899, 902, 906, 909,
+ 912, 915, 919, 922, 925, 928, 932, 935, 938, 941, 945,
+ 948, 951, 955, 958, 961, 965, 968, 971, 975, 978, 982,
+ 985, 988, 992, 995, 999, 1002, 1006, 1009, 1013, 1016, 1020,
+ 1023,
+};
+
+static const u16 xgamma10_36[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17,
+ 17, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22,
+ 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30,
+ 30, 31, 31, 31, 31, 32, 32, 32, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 37,
+ 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40,
+ 41, 41, 41, 42, 42, 43, 43, 43, 44, 44, 44,
+ 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 49,
+ 49, 49, 50, 50, 51, 51, 52, 52, 52, 53, 53,
+ 54, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58,
+ 58, 59, 59, 60, 60, 61, 61, 62, 62, 63, 63,
+ 64, 64, 65, 65, 66, 66, 67, 67, 68, 68, 69,
+ 69, 70, 70, 71, 71, 72, 72, 73, 73, 74, 74,
+ 75, 76, 76, 77, 77, 78, 78, 79, 79, 80, 81,
+ 81, 82, 82, 83, 83, 84, 85, 85, 86, 86, 87,
+ 88, 88, 89, 90, 90, 91, 91, 92, 93, 93, 94,
+ 95, 95, 96, 97, 97, 98, 99, 99, 100, 101, 101,
+ 102, 103, 103, 104, 105, 105, 106, 107, 107, 108, 109,
+ 110, 110, 111, 112, 112, 113, 114, 115, 115, 116, 117,
+ 118, 118, 119, 120, 121, 121, 122, 123, 124, 125, 125,
+ 126, 127, 128, 129, 129, 130, 131, 132, 133, 133, 134,
+ 135, 136, 137, 138, 138, 139, 140, 141, 142, 143, 144,
+ 145, 145, 146, 147, 148, 149, 150, 151, 152, 153, 153,
+ 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+ 187, 188, 189, 190, 191, 192, 193, 194, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 207, 208, 209, 210,
+ 211, 212, 214, 215, 216, 217, 218, 219, 221, 222, 223,
+ 224, 225, 227, 228, 229, 230, 231, 233, 234, 235, 236,
+ 238, 239, 240, 241, 243, 244, 245, 247, 248, 249, 250,
+ 252, 253, 254, 256, 257, 258, 260, 261, 262, 264, 265,
+ 266, 268, 269, 271, 272, 273, 275, 276, 277, 279, 280,
+ 282, 283, 285, 286, 287, 289, 290, 292, 293, 295, 296,
+ 298, 299, 301, 302, 304, 305, 307, 308, 310, 311, 313,
+ 314, 316, 317, 319, 320, 322, 324, 325, 327, 328, 330,
+ 331, 333, 335, 336, 338, 339, 341, 343, 344, 346, 348,
+ 349, 351, 353, 354, 356, 358, 359, 361, 363, 364, 366,
+ 368, 370, 371, 373, 375, 377, 378, 380, 382, 384, 385,
+ 387, 389, 391, 393, 394, 396, 398, 400, 402, 403, 405,
+ 407, 409, 411, 413, 415, 416, 418, 420, 422, 424, 426,
+ 428, 430, 432, 434, 436, 438, 439, 441, 443, 445, 447,
+ 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 470,
+ 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492,
+ 495, 497, 499, 501, 503, 505, 508, 510, 512, 514, 516,
+ 518, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541,
+ 543, 545, 548, 550, 552, 555, 557, 559, 562, 564, 566,
+ 569, 571, 573, 576, 578, 580, 583, 585, 588, 590, 592,
+ 595, 597, 600, 602, 605, 607, 610, 612, 615, 617, 620,
+ 622, 625, 627, 630, 632, 635, 637, 640, 642, 645, 648,
+ 650, 653, 655, 658, 661, 663, 666, 669, 671, 674, 677,
+ 679, 682, 685, 687, 690, 693, 695, 698, 701, 704, 706,
+ 709, 712, 715, 717, 720, 723, 726, 729, 732, 734, 737,
+ 740, 743, 746, 749, 751, 754, 757, 760, 763, 766, 769,
+ 772, 775, 778, 781, 784, 787, 790, 793, 796, 799, 802,
+ 805, 808, 811, 814, 817, 820, 823, 826, 829, 832, 835,
+ 838, 842, 845, 848, 851, 854, 857, 860, 864, 867, 870,
+ 873, 876, 880, 883, 886, 889, 893, 896, 899, 903, 906,
+ 909, 912, 916, 919, 922, 926, 929, 932, 936, 939, 943,
+ 946, 949, 953, 956, 960, 963, 967, 970, 973, 977, 980,
+ 984, 987, 991, 994, 998, 1002, 1005, 1009, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_37[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22,
+ 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30,
+ 31, 31, 31, 31, 32, 32, 32, 33, 33, 33, 33,
+ 34, 34, 34, 35, 35, 35, 36, 36, 36, 37, 37,
+ 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41,
+ 41, 41, 42, 42, 42, 43, 43, 44, 44, 44, 45,
+ 45, 45, 46, 46, 47, 47, 47, 48, 48, 49, 49,
+ 49, 50, 50, 51, 51, 51, 52, 52, 53, 53, 54,
+ 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 58,
+ 59, 59, 60, 60, 61, 61, 62, 62, 63, 63, 64,
+ 64, 65, 65, 66, 66, 67, 67, 68, 68, 69, 69,
+ 70, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75,
+ 76, 76, 77, 77, 78, 78, 79, 80, 80, 81, 81,
+ 82, 82, 83, 84, 84, 85, 85, 86, 87, 87, 88,
+ 89, 89, 90, 90, 91, 92, 92, 93, 94, 94, 95,
+ 96, 96, 97, 98, 98, 99, 100, 100, 101, 102, 102,
+ 103, 104, 104, 105, 106, 106, 107, 108, 109, 109, 110,
+ 111, 112, 112, 113, 114, 114, 115, 116, 117, 118, 118,
+ 119, 120, 121, 121, 122, 123, 124, 125, 125, 126, 127,
+ 128, 129, 129, 130, 131, 132, 133, 134, 134, 135, 136,
+ 137, 138, 139, 139, 140, 141, 142, 143, 144, 145, 146,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156,
+ 157, 158, 158, 159, 160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+ 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189,
+ 190, 191, 192, 193, 194, 195, 197, 198, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 211, 213, 214,
+ 215, 216, 217, 218, 220, 221, 222, 223, 225, 226, 227,
+ 228, 229, 231, 232, 233, 234, 236, 237, 238, 240, 241,
+ 242, 243, 245, 246, 247, 249, 250, 251, 253, 254, 255,
+ 257, 258, 259, 261, 262, 263, 265, 266, 268, 269, 270,
+ 272, 273, 275, 276, 277, 279, 280, 282, 283, 285, 286,
+ 288, 289, 291, 292, 294, 295, 297, 298, 300, 301, 303,
+ 304, 306, 307, 309, 310, 312, 313, 315, 316, 318, 320,
+ 321, 323, 324, 326, 328, 329, 331, 332, 334, 336, 337,
+ 339, 341, 342, 344, 346, 347, 349, 351, 352, 354, 356,
+ 358, 359, 361, 363, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 380, 382, 384, 386, 388, 389, 391, 393, 395,
+ 397, 399, 401, 402, 404, 406, 408, 410, 412, 414, 416,
+ 418, 420, 421, 423, 425, 427, 429, 431, 433, 435, 437,
+ 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459,
+ 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 483,
+ 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 507,
+ 509, 511, 513, 515, 518, 520, 522, 524, 527, 529, 531,
+ 534, 536, 538, 541, 543, 545, 548, 550, 552, 555, 557,
+ 559, 562, 564, 567, 569, 571, 574, 576, 579, 581, 584,
+ 586, 589, 591, 593, 596, 598, 601, 603, 606, 609, 611,
+ 614, 616, 619, 621, 624, 626, 629, 632, 634, 637, 639,
+ 642, 645, 647, 650, 653, 655, 658, 661, 663, 666, 669,
+ 672, 674, 677, 680, 682, 685, 688, 691, 694, 696, 699,
+ 702, 705, 708, 710, 713, 716, 719, 722, 725, 728, 730,
+ 733, 736, 739, 742, 745, 748, 751, 754, 757, 760, 763,
+ 766, 769, 772, 775, 778, 781, 784, 787, 790, 793, 796,
+ 799, 802, 805, 809, 812, 815, 818, 821, 824, 827, 831,
+ 834, 837, 840, 843, 847, 850, 853, 856, 860, 863, 866,
+ 869, 873, 876, 879, 883, 886, 889, 893, 896, 899, 903,
+ 906, 910, 913, 916, 920, 923, 927, 930, 934, 937, 940,
+ 944, 947, 951, 954, 958, 961, 965, 969, 972, 976, 979,
+ 983, 986, 990, 994, 997, 1001, 1005, 1008, 1012, 1016, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_38[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11,
+ 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 14, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16,
+ 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 20, 20,
+ 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22,
+ 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25,
+ 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 28,
+ 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 31,
+ 31, 31, 31, 32, 32, 32, 33, 33, 33, 33, 34,
+ 34, 34, 35, 35, 35, 36, 36, 36, 37, 37, 37,
+ 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41,
+ 41, 42, 42, 43, 43, 43, 44, 44, 44, 45, 45,
+ 46, 46, 46, 47, 47, 47, 48, 48, 49, 49, 49,
+ 50, 50, 51, 51, 52, 52, 52, 53, 53, 54, 54,
+ 55, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
+ 60, 60, 60, 61, 61, 62, 62, 63, 63, 64, 64,
+ 65, 65, 66, 66, 67, 67, 68, 68, 69, 69, 70,
+ 70, 71, 72, 72, 73, 73, 74, 74, 75, 75, 76,
+ 76, 77, 78, 78, 79, 79, 80, 81, 81, 82, 82,
+ 83, 83, 84, 85, 85, 86, 86, 87, 88, 88, 89,
+ 90, 90, 91, 92, 92, 93, 93, 94, 95, 95, 96,
+ 97, 97, 98, 99, 99, 100, 101, 102, 102, 103, 104,
+ 104, 105, 106, 106, 107, 108, 109, 109, 110, 111, 112,
+ 112, 113, 114, 115, 115, 116, 117, 118, 118, 119, 120,
+ 121, 122, 122, 123, 124, 125, 126, 126, 127, 128, 129,
+ 130, 130, 131, 132, 133, 134, 135, 136, 136, 137, 138,
+ 139, 140, 141, 142, 143, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 153, 154, 155, 156, 157, 158,
+ 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180,
+ 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205,
+ 206, 207, 208, 210, 211, 212, 213, 214, 216, 217, 218,
+ 219, 220, 222, 223, 224, 225, 227, 228, 229, 230, 232,
+ 233, 234, 235, 237, 238, 239, 241, 242, 243, 245, 246,
+ 247, 249, 250, 251, 253, 254, 255, 257, 258, 259, 261,
+ 262, 264, 265, 266, 268, 269, 271, 272, 274, 275, 276,
+ 278, 279, 281, 282, 284, 285, 287, 288, 290, 291, 293,
+ 294, 296, 297, 299, 300, 302, 303, 305, 307, 308, 310,
+ 311, 313, 314, 316, 318, 319, 321, 323, 324, 326, 327,
+ 329, 331, 332, 334, 336, 337, 339, 341, 342, 344, 346,
+ 348, 349, 351, 353, 354, 356, 358, 360, 361, 363, 365,
+ 367, 369, 370, 372, 374, 376, 378, 379, 381, 383, 385,
+ 387, 389, 391, 392, 394, 396, 398, 400, 402, 404, 406,
+ 408, 410, 412, 413, 415, 417, 419, 421, 423, 425, 427,
+ 429, 431, 433, 435, 437, 439, 441, 443, 446, 448, 450,
+ 452, 454, 456, 458, 460, 462, 464, 466, 469, 471, 473,
+ 475, 477, 479, 482, 484, 486, 488, 490, 493, 495, 497,
+ 499, 501, 504, 506, 508, 511, 513, 515, 517, 520, 522,
+ 524, 527, 529, 531, 534, 536, 538, 541, 543, 546, 548,
+ 550, 553, 555, 558, 560, 562, 565, 567, 570, 572, 575,
+ 577, 580, 582, 585, 587, 590, 592, 595, 597, 600, 603,
+ 605, 608, 610, 613, 616, 618, 621, 623, 626, 629, 631,
+ 634, 637, 639, 642, 645, 648, 650, 653, 656, 658, 661,
+ 664, 667, 669, 672, 675, 678, 681, 684, 686, 689, 692,
+ 695, 698, 701, 703, 706, 709, 712, 715, 718, 721, 724,
+ 727, 730, 733, 736, 739, 742, 745, 748, 751, 754, 757,
+ 760, 763, 766, 769, 772, 775, 778, 781, 785, 788, 791,
+ 794, 797, 800, 803, 807, 810, 813, 816, 820, 823, 826,
+ 829, 832, 836, 839, 842, 846, 849, 852, 856, 859, 862,
+ 866, 869, 872, 876, 879, 883, 886, 889, 893, 896, 900,
+ 903, 907, 910, 914, 917, 921, 924, 928, 931, 935, 938,
+ 942, 945, 949, 953, 956, 960, 964, 967, 971, 974, 978,
+ 982, 986, 989, 993, 997, 1000, 1004, 1008, 1012, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_39[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11,
+ 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16,
+ 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 19, 19, 19, 19, 19, 20, 20, 20,
+ 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25,
+ 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28,
+ 28, 28, 29, 29, 29, 29, 30, 30, 30, 31, 31,
+ 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34,
+ 35, 35, 35, 35, 36, 36, 36, 37, 37, 37, 38,
+ 38, 38, 39, 39, 39, 40, 40, 41, 41, 41, 42,
+ 42, 42, 43, 43, 43, 44, 44, 45, 45, 45, 46,
+ 46, 46, 47, 47, 48, 48, 48, 49, 49, 50, 50,
+ 51, 51, 51, 52, 52, 53, 53, 53, 54, 54, 55,
+ 55, 56, 56, 57, 57, 57, 58, 58, 59, 59, 60,
+ 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65,
+ 66, 66, 67, 67, 68, 68, 69, 69, 70, 70, 71,
+ 71, 72, 73, 73, 74, 74, 75, 75, 76, 76, 77,
+ 78, 78, 79, 79, 80, 80, 81, 82, 82, 83, 83,
+ 84, 85, 85, 86, 87, 87, 88, 88, 89, 90, 90,
+ 91, 92, 92, 93, 94, 94, 95, 96, 96, 97, 98,
+ 98, 99, 100, 100, 101, 102, 102, 103, 104, 105, 105,
+ 106, 107, 107, 108, 109, 110, 110, 111, 112, 113, 113,
+ 114, 115, 116, 116, 117, 118, 119, 120, 120, 121, 122,
+ 123, 124, 124, 125, 126, 127, 128, 129, 129, 130, 131,
+ 132, 133, 134, 134, 135, 136, 137, 138, 139, 140, 141,
+ 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 153, 154, 155, 156, 157, 158, 159, 160, 161,
+ 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+ 173, 174, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 190, 191, 192, 193, 194, 195, 196,
+ 198, 199, 200, 201, 202, 203, 204, 206, 207, 208, 209,
+ 210, 212, 213, 214, 215, 217, 218, 219, 220, 221, 223,
+ 224, 225, 227, 228, 229, 230, 232, 233, 234, 236, 237,
+ 238, 239, 241, 242, 243, 245, 246, 248, 249, 250, 252,
+ 253, 254, 256, 257, 259, 260, 261, 263, 264, 266, 267,
+ 269, 270, 271, 273, 274, 276, 277, 279, 280, 282, 283,
+ 285, 286, 288, 289, 291, 292, 294, 295, 297, 299, 300,
+ 302, 303, 305, 306, 308, 310, 311, 313, 314, 316, 318,
+ 319, 321, 323, 324, 326, 328, 329, 331, 333, 334, 336,
+ 338, 340, 341, 343, 345, 346, 348, 350, 352, 353, 355,
+ 357, 359, 361, 362, 364, 366, 368, 370, 372, 373, 375,
+ 377, 379, 381, 383, 385, 386, 388, 390, 392, 394, 396,
+ 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418,
+ 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440,
+ 442, 444, 446, 448, 451, 453, 455, 457, 459, 461, 463,
+ 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 488,
+ 490, 492, 494, 497, 499, 501, 504, 506, 508, 511, 513,
+ 515, 518, 520, 522, 525, 527, 529, 532, 534, 537, 539,
+ 541, 544, 546, 549, 551, 554, 556, 559, 561, 564, 566,
+ 569, 571, 574, 576, 579, 581, 584, 586, 589, 592, 594,
+ 597, 599, 602, 605, 607, 610, 613, 615, 618, 621, 623,
+ 626, 629, 632, 634, 637, 640, 643, 645, 648, 651, 654,
+ 656, 659, 662, 665, 668, 671, 673, 676, 679, 682, 685,
+ 688, 691, 694, 697, 700, 702, 705, 708, 711, 714, 717,
+ 720, 723, 726, 729, 732, 735, 739, 742, 745, 748, 751,
+ 754, 757, 760, 763, 766, 770, 773, 776, 779, 782, 786,
+ 789, 792, 795, 798, 802, 805, 808, 811, 815, 818, 821,
+ 825, 828, 831, 835, 838, 841, 845, 848, 852, 855, 858,
+ 862, 865, 869, 872, 876, 879, 883, 886, 890, 893, 897,
+ 900, 904, 907, 911, 914, 918, 922, 925, 929, 933, 936,
+ 940, 944, 947, 951, 955, 958, 962, 966, 969, 973, 977,
+ 981, 985, 988, 992, 996, 1000, 1004, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 xgamma10_40[GAMMA10_TABLE_LENGTH] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
+ 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18,
+ 18, 18, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23,
+ 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25,
+ 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28,
+ 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31,
+ 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 35,
+ 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38,
+ 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42,
+ 43, 43, 43, 44, 44, 44, 45, 45, 46, 46, 46,
+ 47, 47, 48, 48, 48, 49, 49, 50, 50, 50, 51,
+ 51, 52, 52, 53, 53, 53, 54, 54, 55, 55, 56,
+ 56, 57, 57, 57, 58, 58, 59, 59, 60, 60, 61,
+ 61, 62, 62, 63, 63, 64, 64, 65, 65, 66, 66,
+ 67, 67, 68, 68, 69, 69, 70, 70, 71, 72, 72,
+ 73, 73, 74, 74, 75, 75, 76, 77, 77, 78, 78,
+ 79, 79, 80, 81, 81, 82, 82, 83, 84, 84, 85,
+ 85, 86, 87, 87, 88, 89, 89, 90, 91, 91, 92,
+ 93, 93, 94, 95, 95, 96, 97, 97, 98, 99, 99,
+ 100, 101, 101, 102, 103, 104, 104, 105, 106, 106, 107,
+ 108, 109, 109, 110, 111, 112, 112, 113, 114, 115, 116,
+ 116, 117, 118, 119, 119, 120, 121, 122, 123, 123, 124,
+ 125, 126, 127, 128, 128, 129, 130, 131, 132, 133, 134,
+ 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154,
+ 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165,
+ 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 180, 182, 183, 184, 185, 186, 187, 188,
+ 189, 190, 192, 193, 194, 195, 196, 197, 199, 200, 201,
+ 202, 203, 204, 206, 207, 208, 209, 210, 212, 213, 214,
+ 215, 217, 218, 219, 220, 222, 223, 224, 226, 227, 228,
+ 229, 231, 232, 233, 235, 236, 237, 239, 240, 241, 243,
+ 244, 245, 247, 248, 250, 251, 252, 254, 255, 257, 258,
+ 259, 261, 262, 264, 265, 267, 268, 270, 271, 273, 274,
+ 276, 277, 279, 280, 282, 283, 285, 286, 288, 289, 291,
+ 292, 294, 296, 297, 299, 300, 302, 304, 305, 307, 308,
+ 310, 312, 313, 315, 317, 318, 320, 322, 323, 325, 327,
+ 328, 330, 332, 333, 335, 337, 339, 340, 342, 344, 346,
+ 348, 349, 351, 353, 355, 357, 358, 360, 362, 364, 366,
+ 368, 369, 371, 373, 375, 377, 379, 381, 383, 385, 386,
+ 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408,
+ 410, 412, 414, 416, 418, 420, 422, 424, 426, 429, 431,
+ 433, 435, 437, 439, 441, 443, 445, 448, 450, 452, 454,
+ 456, 458, 461, 463, 465, 467, 469, 472, 474, 476, 478,
+ 481, 483, 485, 488, 490, 492, 495, 497, 499, 501, 504,
+ 506, 509, 511, 513, 516, 518, 521, 523, 525, 528, 530,
+ 533, 535, 538, 540, 543, 545, 548, 550, 553, 555, 558,
+ 560, 563, 565, 568, 570, 573, 576, 578, 581, 583, 586,
+ 589, 591, 594, 597, 599, 602, 605, 607, 610, 613, 616,
+ 618, 621, 624, 627, 629, 632, 635, 638, 641, 643, 646,
+ 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 678,
+ 681, 684, 687, 690, 693, 696, 699, 702, 705, 708, 711,
+ 714, 717, 720, 723, 726, 729, 732, 735, 739, 742, 745,
+ 748, 751, 754, 758, 761, 764, 767, 770, 774, 777, 780,
+ 783, 787, 790, 793, 797, 800, 803, 807, 810, 813, 817,
+ 820, 824, 827, 830, 834, 837, 841, 844, 848, 851, 855,
+ 858, 862, 865, 869, 872, 876, 879, 883, 886, 890, 894,
+ 897, 901, 905, 908, 912, 916, 919, 923, 927, 930, 934,
+ 938, 942, 945, 949, 953, 957, 960, 964, 968, 972, 976,
+ 980, 984, 987, 991, 995, 999, 1003, 1007, 1011, 1015, 1019,
+ 1023,
+};
+
+static const u16 *xgamma10_curves[GAMMA_CURVE_LENGTH] = {
+ &xgamma10_01[0],
+ &xgamma10_02[0],
+ &xgamma10_03[0],
+ &xgamma10_04[0],
+ &xgamma10_05[0],
+ &xgamma10_06[0],
+ &xgamma10_07[0],
+ &xgamma10_08[0],
+ &xgamma10_09[0],
+ &xgamma10_10[0],
+ &xgamma10_11[0],
+ &xgamma10_12[0],
+ &xgamma10_13[0],
+ &xgamma10_14[0],
+ &xgamma10_15[0],
+ &xgamma10_16[0],
+ &xgamma10_17[0],
+ &xgamma10_18[0],
+ &xgamma10_19[0],
+ &xgamma10_20[0],
+ &xgamma10_21[0],
+ &xgamma10_22[0],
+ &xgamma10_23[0],
+ &xgamma10_24[0],
+ &xgamma10_25[0],
+ &xgamma10_26[0],
+ &xgamma10_27[0],
+ &xgamma10_28[0],
+ &xgamma10_29[0],
+ &xgamma10_30[0],
+ &xgamma10_31[0],
+ &xgamma10_32[0],
+ &xgamma10_33[0],
+ &xgamma10_34[0],
+ &xgamma10_35[0],
+ &xgamma10_36[0],
+ &xgamma10_37[0],
+ &xgamma10_38[0],
+ &xgamma10_39[0],
+ &xgamma10_40[0],
+};
+#endif /* __XILINX_GAMMA_COEFF_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-gamma.c b/drivers/media/platform/xilinx/xilinx-gamma.c
new file mode 100644
index 000000000000..d7996d0f34fa
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-gamma.c
@@ -0,0 +1,543 @@
+/*
+ * Xilinx Gamma Correction IP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-gamma-coeff.h"
+#include "xilinx-vip.h"
+
+#define XGAMMA_MIN_HEIGHT (64)
+#define XGAMMA_MAX_HEIGHT (4320)
+#define XGAMMA_DEF_HEIGHT (720)
+#define XGAMMA_MIN_WIDTH (64)
+#define XGAMMA_MAX_WIDTH (8192)
+#define XGAMMA_DEF_WIDTH (1280)
+
+#define XGAMMA_AP_CTRL (0x0000)
+#define XGAMMA_GIE (0x0004)
+#define XGAMMA_IER (0x0008)
+#define XGAMMA_ISR (0x000c)
+#define XGAMMA_WIDTH (0x0010)
+#define XGAMMA_HEIGHT (0x0018)
+#define XGAMMA_VIDEO_FORMAT (0x0020)
+#define XGAMMA_GAMMA_LUT_0_BASE (0x0800)
+#define XGAMMA_GAMMA_LUT_1_BASE (0x1000)
+#define XGAMMA_GAMMA_LUT_2_BASE (0x1800)
+
+#define XGAMMA_RESET_DEASSERT (0)
+#define XGAMMA_RESET_ASSERT (1)
+#define XGAMMA_START BIT(0)
+#define XGAMMA_AUTO_RESTART BIT(7)
+#define XGAMMA_STREAM_ON (XGAMMA_START | XGAMMA_AUTO_RESTART)
+
+enum xgamma_video_format {
+ XGAMMA_RGB = 0,
+};
+
+/**
+ * struct xgamma_dev - Xilinx Video Gamma LUT device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @ctrl_handler: V4L2 Control Handler for R,G,B Gamma Controls
+ * @red_lut: Pointer to the gamma coefficient as per the Red Gamma control
+ * @green_lut: Pointer to the gamma coefficient as per the Green Gamma control
+ * @blue_lut: Pointer to the gamma coefficient as per the Blue Gamma control
+ * @color_depth: Color depth of the Video Gamma IP
+ * @gamma_table: Pointer to the table containing various gamma values
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @max_width: Maximum width supported by this instance.
+ * @max_height: Maximum height supported by this instance.
+ */
+struct xgamma_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ const u16 *red_lut;
+ const u16 *green_lut;
+ const u16 *blue_lut;
+ u32 color_depth;
+ const u16 **gamma_table;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+static inline u32 xg_read(struct xgamma_dev *xg, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xg->xvip, reg);
+ dev_dbg(xg->xvip.dev,
+ "Reading 0x%x from reg offset 0x%x", data, reg);
+ return data;
+}
+
+static inline void xg_write(struct xgamma_dev *xg, u32 reg, u32 data)
+{
+ dev_dbg(xg->xvip.dev,
+ "Writing 0x%x to reg offset 0x%x", data, reg);
+ xvip_write(&xg->xvip, reg, data);
+#ifdef DEBUG
+ if (xg_read(xg, reg) != data)
+ dev_err(xg->xvip.dev,
+ "Write 0x%x does not match read back", data);
+#endif
+}
+
+static inline struct xgamma_dev *to_xg(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xgamma_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xg_get_pad_format(struct xgamma_dev *xg,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(
+ &xg->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xg->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void xg_set_lut_entries(struct xgamma_dev *xg,
+ const u16 *lut, const u32 lut_base)
+{
+ int itr;
+ u32 lut_offset, lut_data;
+
+ lut_offset = lut_base;
+ /* Write LUT Entries */
+ for (itr = 0; itr < BIT(xg->color_depth - 1); itr++) {
+ lut_data = (lut[2 * itr + 1] << 16) | lut[2 * itr];
+ xg_write(xg, lut_offset, lut_data);
+ lut_offset += 4;
+ }
+}
+
+static int xg_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ if (!enable) {
+ dev_dbg(xg->xvip.dev, "%s : Off", __func__);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_ASSERT);
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+ return 0;
+ }
+ dev_dbg(xg->xvip.dev, "%s : Started", __func__);
+
+ dev_dbg(xg->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, xg->formats[XVIP_PAD_SINK].width,
+ xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_WIDTH, xg->formats[XVIP_PAD_SINK].width);
+ xg_write(xg, XGAMMA_HEIGHT, xg->formats[XVIP_PAD_SINK].height);
+ xg_write(xg, XGAMMA_VIDEO_FORMAT, XGAMMA_RGB);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_2_BASE);
+
+ /* Start GAMMA Correction LUT Video IP */
+ xg_write(xg, XGAMMA_AP_CTRL, XGAMMA_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xg_video_ops = {
+ .s_stream = xg_s_stream,
+};
+
+static int xg_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+
+ fmt->format = *__xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xg_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *__format;
+
+ __format = __xg_get_pad_format(xg, cfg, fmt->pad, fmt->which);
+ *__format = fmt->format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ if (__format->code != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xg->xvip.dev,
+ "Unsupported sink media bus code format");
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ }
+ }
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XGAMMA_MIN_WIDTH, xg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XGAMMA_MIN_HEIGHT, xg->max_height);
+
+ fmt->format = *__format;
+ /* Propagate to Source Pad */
+ __format = __xg_get_pad_format(xg, cfg, XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+ return 0;
+}
+
+static int xg_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xgamma_dev *xg = to_xg(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xg->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xg->default_formats[XVIP_PAD_SOURCE];
+ return 0;
+}
+
+static int xg_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xg_internal_ops = {
+ .open = xg_open,
+ .close = xg_close,
+};
+
+static const struct v4l2_subdev_pad_ops xg_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xg_get_format,
+ .set_fmt = xg_set_format,
+};
+
+static const struct v4l2_subdev_ops xg_ops = {
+ .video = &xg_video_ops,
+ .pad = &xg_pad_ops,
+};
+
+static int
+select_gamma(s32 value, const u16 **coeff, const u16 **xgamma_curves)
+{
+ if (!coeff)
+ return -EINVAL;
+ if (value <= 0 || value > GAMMA_CURVE_LENGTH)
+ return -EINVAL;
+
+ *coeff = *(xgamma_curves + value - 1);
+ return 0;
+}
+
+static int xg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int rval;
+ struct xgamma_dev *xg =
+ container_of(ctrl->handler,
+ struct xgamma_dev, ctrl_handler);
+ dev_dbg(xg->xvip.dev, "%s called", __func__);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->red_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Red Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Red Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->red_lut, XGAMMA_GAMMA_LUT_0_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->blue_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Blue Gamma");
+ return rval;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Blue Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->blue_lut, XGAMMA_GAMMA_LUT_1_BASE);
+ break;
+ case V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA:
+ rval = select_gamma(ctrl->val, &xg->green_lut, xg->gamma_table);
+ if (rval < 0) {
+ dev_err(xg->xvip.dev, "Invalid Green Gamma");
+ return -EINVAL;
+ }
+ dev_dbg(xg->xvip.dev, "%s: Setting Green Gamma to %d.%d",
+ __func__, ctrl->val / 10, ctrl->val % 10);
+ xg_set_lut_entries(xg, xg->green_lut, XGAMMA_GAMMA_LUT_2_BASE);
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xg_ctrl_ops = {
+ .s_ctrl = xg_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xg_ctrls[] = {
+ /* Red Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA,
+ .name = "Red Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA,
+ .name = "Blue Gamma Correction|1->0.1|10->1.0",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gamma */
+ {
+ .ops = &xg_ctrl_ops,
+ .id = V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA,
+ .name = "Green Gamma Correction|1->0.1|10->1.0)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 40,
+ .step = 1,
+ .def = 10,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static const struct media_entity_operations xg_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xg_parse_of(struct xgamma_dev *xg)
+{
+ struct device *dev = xg->xvip.dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id = 0;
+ int rval;
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xg->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xg->max_height > XGAMMA_MAX_HEIGHT ||
+ xg->max_height < XGAMMA_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xg->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xg->max_width > XGAMMA_MAX_WIDTH ||
+ xg->max_width < XGAMMA_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT");
+ return rval;
+ }
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &xg->color_depth);
+ if (rval < 0) {
+ dev_err(dev, "Missing xlnx-video-width in DT");
+ return rval;
+ }
+ switch (xg->color_depth) {
+ case GAMMA_BPC_8:
+ xg->gamma_table = xgamma8_curves;
+ break;
+ case GAMMA_BPC_10:
+ xg->gamma_table = xgamma10_curves;
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d",
+ xg->color_depth);
+ return -EINVAL;
+ }
+ }
+ }
+
+ xg->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xg->rst_gpio)) {
+ if (PTR_ERR(xg->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xg->rst_gpio);
+ }
+ return 0;
+}
+
+static int xg_probe(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ dev_dbg(&pdev->dev, "Gamma LUT Probe Started");
+ xg = devm_kzalloc(&pdev->dev, sizeof(*xg), GFP_KERNEL);
+ if (!xg)
+ return -ENOMEM;
+ xg->xvip.dev = &pdev->dev;
+ rval = xg_parse_of(xg);
+ if (rval < 0)
+ return rval;
+ rval = xvip_init_resources(&xg->xvip);
+
+ dev_dbg(xg->xvip.dev, "Reset Xilinx Video Gamma Corrrection");
+ gpiod_set_value_cansleep(xg->rst_gpio, XGAMMA_RESET_DEASSERT);
+
+ /* Init V4L2 subdev */
+ subdev = &xg->xvip.subdev;
+ v4l2_subdev_init(subdev, &xg_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xg_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ def_fmt = &xg->default_formats[XVIP_PAD_SINK];
+ /* GAMMA LUT IP only to be supported for RGB */
+ def_fmt->code = MEDIA_BUS_FMT_RBG888_1X24;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ def_fmt->width = XGAMMA_DEF_WIDTH;
+ def_fmt->height = XGAMMA_DEF_HEIGHT;
+ xg->formats[XVIP_PAD_SINK] = *def_fmt;
+
+ def_fmt = &xg->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xg->default_formats[XVIP_PAD_SINK];
+ xg->formats[XVIP_PAD_SOURCE] = *def_fmt;
+
+ xg->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xg->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xg_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xg->pads);
+ if (rval < 0)
+ goto media_error;
+
+ /* V4L2 Controls */
+ v4l2_ctrl_handler_init(&xg->ctrl_handler, ARRAY_SIZE(xg_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xg_ctrls); itr++) {
+ v4l2_ctrl_new_custom(&xg->ctrl_handler,
+ &xg_ctrls[itr], NULL);
+ }
+ if (xg->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add V4L2 controls");
+ rval = xg->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xg->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xg->ctrl_handler);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+
+ platform_set_drvdata(pdev, xg);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto v4l2_subdev_error;
+ }
+ dev_info(&pdev->dev,
+ "Xilinx %d-bit Video Gamma Correction LUT registered",
+ xg->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xg->ctrl_handler);
+v4l2_subdev_error:
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xg->xvip);
+ return rval;
+}
+
+static int xg_remove(struct platform_device *pdev)
+{
+ struct xgamma_dev *xg = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xg->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ /* Add entry to cleanup v4l2 control handle */
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xg->xvip);
+ return 0;
+}
+
+static const struct of_device_id xg_of_id_table[] = {
+ {.compatible = "xlnx,v-gamma-lut"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xg_of_id_table);
+
+static struct platform_driver xg_driver = {
+ .driver = {
+ .name = "xilinx-gamma-lut",
+ .of_match_table = xg_of_id_table,
+ },
+ .probe = xg_probe,
+ .remove = xg_remove,
+};
+
+module_platform_driver(xg_driver);
+MODULE_DESCRIPTION("Xilinx Video Gamma Correction LUT Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-hls-common.h b/drivers/media/platform/xilinx/xilinx-hls-common.h
new file mode 100644
index 000000000000..8ecc3cfb8a83
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls-common.h
@@ -0,0 +1,36 @@
+/*
+ * Xilinx HLS common header
+ *
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Radhey Shyam Pandey <radheys@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __XILINX_HLS_COMMON_H__
+#define __XILINX_HLS_COMMON_H__
+
+#include <linux/bitops.h>
+
+#define XHLS_DEF_WIDTH 1920
+#define XHLS_DEF_HEIGHT 1080
+
+#define XHLS_REG_CTRL_DONE BIT(1)
+#define XHLS_REG_CTRL_IDLE BIT(2)
+#define XHLS_REG_CTRL_READY BIT(3)
+#define XHLS_REG_CTRL_AUTO_RESTART BIT(7)
+#define XHLS_REG_GIE 0x04
+#define XHLS_REG_GIE_GIE BIT(0)
+#define XHLS_REG_IER 0x08
+#define XHLS_REG_IER_DONE BIT(0)
+#define XHLS_REG_IER_READY BIT(1)
+#define XHLS_REG_ISR 0x0c
+#define XHLS_REG_ISR_DONE BIT(0)
+#define XHLS_REG_ISR_READY BIT(1)
+#define XHLS_REG_ROWS 0x10
+#define XHLS_REG_COLS 0x18
+
+#endif /* __XILINX_HLS_COMMON_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-hls.c b/drivers/media/platform/xilinx/xilinx-hls.c
new file mode 100644
index 000000000000..fc42977440a9
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-hls.c
@@ -0,0 +1,481 @@
+/*
+ * Xilinx HLS Core
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/xilinx-hls.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-hls-common.h"
+#include "xilinx-vip.h"
+
+/**
+ * struct xhls_device - Xilinx HLS Core device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @compatible: first DT compatible string for the device
+ * @formats: active V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: format information corresponding to the pads active formats
+ * @model: additional description of IP implementation if available
+ * @ctrl_handler: control handler
+ * @user_mem: user portion of the register space
+ * @user_mem_size: size of the user portion of the register space
+ */
+struct xhls_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+
+ const char *compatible;
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *model;
+
+ void __iomem *user_mem;
+ size_t user_mem_size;
+};
+
+static inline struct xhls_device *to_hls(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xhls_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static const struct v4l2_ctrl_config xhls_model_ctrl = {
+ .id = V4L2_CID_XILINX_HLS_MODEL,
+ .name = "HLS Model",
+ .type = V4L2_CTRL_TYPE_STRING,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+};
+
+static int xhls_create_controls(struct xhls_device *xhls)
+{
+ struct v4l2_ctrl_config model = xhls_model_ctrl;
+ struct v4l2_ctrl *ctrl;
+
+ model.max = strlen(xhls->compatible);
+ model.min = model.max;
+
+ v4l2_ctrl_handler_init(&xhls->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_custom(&xhls->ctrl_handler, &model, NULL);
+
+ if (xhls->ctrl_handler.error) {
+ dev_err(xhls->xvip.dev, "failed to add controls\n");
+ return xhls->ctrl_handler.error;
+ }
+
+ v4l2_ctrl_s_ctrl_string(ctrl, xhls->compatible);
+
+ xhls->xvip.subdev.ctrl_handler = &xhls->ctrl_handler;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int xhls_user_read(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ unsigned int i;
+ u32 offset;
+ u32 value;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&offset, &regs->regs[i].offset,
+ sizeof(offset)))
+ return -EFAULT;
+
+ if (offset >= xhls->user_mem_size || offset & 3)
+ return -EINVAL;
+
+ value = ioread32(xhls->user_mem + offset);
+
+ if (copy_to_user(&regs->regs[i].value, &value, sizeof(value)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int xhls_user_write(struct xhls_device *xhls,
+ struct xilinx_axi_hls_registers *regs)
+{
+ struct xilinx_axi_hls_register reg;
+ unsigned int i;
+
+ if (regs->num_regs >= xhls->user_mem_size / 4)
+ return -EINVAL;
+
+ for (i = 0; i < regs->num_regs; ++i) {
+ if (copy_from_user(&reg, &regs->regs[i], sizeof(reg)))
+ return -EFAULT;
+
+ if (reg.offset >= xhls->user_mem_size || reg.offset & 3)
+ return -EINVAL;
+
+ iowrite32(reg.value, xhls->user_mem + reg.offset);
+ }
+
+ return 0;
+}
+
+static long xhls_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ switch (cmd) {
+ case XILINX_AXI_HLS_READ:
+ return xhls_user_read(xhls, arg);
+ case XILINX_AXI_HLS_WRITE:
+ return xhls_user_write(xhls, arg);
+ }
+
+ return -ENOTTY;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xhls_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format = &xhls->formats[XVIP_PAD_SINK];
+
+ if (!enable) {
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL, 0);
+ return 0;
+ }
+
+ xvip_write(&xhls->xvip, XHLS_REG_COLS, format->width);
+ xvip_write(&xhls->xvip, XHLS_REG_ROWS, format->height);
+
+ xvip_write(&xhls->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART | XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xhls_get_pad_format(struct xhls_device *xhls,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xhls->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xhls->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xhls_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+
+ fmt->format = *__xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xhls_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xhls_get_pad_format(xhls, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xhls_get_pad_format(xhls, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xhls_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xhls_device *xhls = to_hls(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xhls->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xhls_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xhls_core_ops = {
+ .ioctl = xhls_ioctl,
+};
+
+static struct v4l2_subdev_video_ops xhls_video_ops = {
+ .s_stream = xhls_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xhls_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xhls_get_format,
+ .set_fmt = xhls_set_format,
+};
+
+static struct v4l2_subdev_ops xhls_ops = {
+ .core = &xhls_core_ops,
+ .video = &xhls_video_ops,
+ .pad = &xhls_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xhls_internal_ops = {
+ .open = xhls_open,
+ .close = xhls_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xhls_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static void xhls_init_formats(struct xhls_device *xhls)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize default and active formats */
+ format = &xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SINK]->code;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->width = xvip_read(&xhls->xvip, XHLS_REG_COLS);
+ format->height = xvip_read(&xhls->xvip, XHLS_REG_ROWS);
+
+ xhls->formats[XVIP_PAD_SINK] = *format;
+
+ format = &xhls->default_formats[XVIP_PAD_SOURCE];
+ *format = xhls->default_formats[XVIP_PAD_SINK];
+ format->code = xhls->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xhls->formats[XVIP_PAD_SOURCE] = *format;
+}
+
+static int xhls_parse_of(struct xhls_device *xhls)
+{
+ struct device *dev = xhls->xvip.dev;
+ struct device_node *node = xhls->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_string(node, "compatible", &xhls->compatible);
+ if (ret < 0)
+ return -EINVAL;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xhls->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xhls_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xhls_device *xhls;
+ struct resource *mem;
+ int ret;
+
+ xhls = devm_kzalloc(&pdev->dev, sizeof(*xhls), GFP_KERNEL);
+ if (!xhls)
+ return -ENOMEM;
+
+ xhls->xvip.dev = &pdev->dev;
+
+ ret = xhls_parse_of(xhls);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xhls->xvip);
+ if (ret < 0)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ xhls->user_mem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xhls->user_mem))
+ return PTR_ERR(xhls->user_mem);
+ xhls->user_mem_size = resource_size(mem);
+
+ /* Reset and initialize the core */
+ xvip_reset(&xhls->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xhls->xvip.subdev;
+ v4l2_subdev_init(subdev, &xhls_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xhls_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xhls);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xhls_init_formats(xhls);
+
+ xhls->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xhls->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xhls_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xhls->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = xhls_create_controls(xhls);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xhls);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(xhls->xvip.dev, "device %s found\n", xhls->compatible);
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xhls->xvip);
+ return ret;
+}
+
+static int xhls_remove(struct platform_device *pdev)
+{
+ struct xhls_device *xhls = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xhls->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xhls->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xhls->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xhls_of_id_table[] = {
+ { .compatible = "xlnx,v-hls" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xhls_of_id_table);
+
+static struct platform_driver xhls_driver = {
+ .driver = {
+ .name = "xilinx-hls",
+ .of_match_table = xhls_of_id_table,
+ },
+ .probe = xhls_probe,
+ .remove = xhls_remove,
+};
+
+module_platform_driver(xhls_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx HLS Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-m2m.c b/drivers/media/platform/xilinx/xilinx-m2m.c
new file mode 100644
index 000000000000..5b23ff3c6ab6
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-m2m.c
@@ -0,0 +1,2108 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx V4L2 mem2mem driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Author: Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/delay.h>
+#include <linux/dma/xilinx_frmbuf.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-vip.h"
+
+#define XVIP_M2M_NAME "xilinx-mem2mem"
+#define XVIP_M2M_DEFAULT_FMT V4L2_PIX_FMT_RGB24
+
+/* Minimum and maximum widths are expressed in bytes */
+#define XVIP_M2M_MIN_WIDTH 1U
+#define XVIP_M2M_MAX_WIDTH 65535U
+#define XVIP_M2M_MIN_HEIGHT 1U
+#define XVIP_M2M_MAX_HEIGHT 8191U
+
+#define XVIP_M2M_DEF_WIDTH 1920
+#define XVIP_M2M_DEF_HEIGHT 1080
+
+#define XVIP_M2M_PAD_SINK 1
+#define XVIP_M2M_PAD_SOURCE 0
+
+/**
+ * struct xvip_graph_entity - Entity in the video graph
+ * @list: list entry in a graph entities list
+ * @node: the entity's DT node
+ * @entity: media entity, from the corresponding V4L2 subdev
+ * @asd: subdev asynchronous registration information
+ * @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
+ */
+struct xvip_graph_entity {
+ struct list_head list;
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+ bool streaming;
+};
+
+/**
+ * struct xvip_pipeline - Xilinx Video IP pipeline structure
+ * @pipe: media pipeline
+ * @lock: protects the pipeline @stream_count
+ * @use_count: number of DMA engines using the pipeline
+ * @stream_count: number of DMA engines currently streaming
+ * @num_dmas: number of DMA engines in the pipeline
+ * @xdev: Composite device the pipe belongs to
+ */
+struct xvip_pipeline {
+ struct media_pipeline pipe;
+
+ /* protects the pipeline @stream_count */
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+
+ unsigned int num_dmas;
+ struct xvip_m2m_dev *xdev;
+};
+
+struct xventity_list {
+ struct list_head list;
+ struct media_entity *entity;
+};
+
+/**
+ * struct xvip_m2m_dev - Xilinx Video mem2mem device structure
+ * @v4l2_dev: V4L2 device
+ * @dev: (OF) device
+ * @media_dev: media device
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entities: entities in the graph as a list of xvip_graph_entity
+ * @num_subdevs: number of subdevs in the pipeline
+ * @lock: This is to protect mem2mem context structure data
+ * @queued_lock: This is to protect video buffer information
+ * @dma: Video DMA channels
+ * @m2m_dev: V4L2 mem2mem device structure
+ * @v4l2_caps: V4L2 capabilities of the whole device
+ */
+struct xvip_m2m_dev {
+ struct v4l2_device v4l2_dev;
+ struct device *dev;
+
+ struct media_device media_dev;
+ struct v4l2_async_notifier notifier;
+ struct list_head entities;
+ unsigned int num_subdevs;
+
+ /* Protects to m2m context data */
+ struct mutex lock;
+
+ /* Protects vb2_v4l2_buffer data */
+ spinlock_t queued_lock;
+ struct xvip_m2m_dma *dma;
+ struct v4l2_m2m_dev *m2m_dev;
+ u32 v4l2_caps;
+};
+
+static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
+{
+ return container_of(e->pipe, struct xvip_pipeline, pipe);
+}
+
+/**
+ * struct xvip_m2m_dma - Video DMA channel
+ * @video: V4L2 video device associated with the DMA channel
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @chan_tx: DMA engine channel for MEM2DEV transfer
+ * @chan_rx: DMA engine channel for DEV2MEM transfer
+ * @outfmt: active V4L2 OUTPUT port pixel format
+ * @capfmt: active V4L2 CAPTURE port pixel format
+ * @r: crop rectangle parameters
+ * @outinfo: format information corresponding to the active @outfmt
+ * @capinfo: format information corresponding to the active @capfmt
+ * @align: transfer alignment required by the DMA channel (in bytes)
+ * @crop: boolean flag to indicate if crop is requested
+ * @pads: media pads for the video M2M device entity
+ * @pipe: pipeline belonging to the DMA channel
+ */
+struct xvip_m2m_dma {
+ struct video_device video;
+ struct xvip_m2m_dev *xdev;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct v4l2_format outfmt;
+ struct v4l2_format capfmt;
+ struct v4l2_rect r;
+ const struct xvip_video_format *outinfo;
+ const struct xvip_video_format *capinfo;
+ u32 align;
+ bool crop;
+
+ struct media_pad pads[2];
+ struct xvip_pipeline pipe;
+};
+
+/**
+ * struct xvip_m2m_ctx - VIPP mem2mem context
+ * @fh: V4L2 file handler
+ * @xdev: composite mem2mem device the DMA channels belongs to
+ * @xt: dma interleaved template for dma configuration
+ * @sgl: data chunk structure for dma_interleaved_template
+ */
+struct xvip_m2m_ctx {
+ struct v4l2_fh fh;
+ struct xvip_m2m_dev *xdev;
+ struct dma_interleaved_template xt;
+ struct data_chunk sgl[1];
+};
+
+static inline struct xvip_m2m_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct xvip_m2m_ctx, fh);
+}
+
+static struct v4l2_subdev *
+xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int xvip_dma_verify_format(struct xvip_m2m_dma *dma)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+ int width, height;
+
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (dma->outinfo->code != fmt.format.code)
+ return -EINVAL;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(dma->outfmt.type)) {
+ width = dma->outfmt.fmt.pix_mp.width;
+ height = dma->outfmt.fmt.pix_mp.height;
+ } else {
+ width = dma->outfmt.fmt.pix.width;
+ height = dma->outfmt.fmt.pix.height;
+ }
+
+ if (width != fmt.format.width || height != fmt.format.height)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define to_xvip_dma(vdev) container_of(vdev, struct xvip_m2m_dma, video)
+/* -----------------------------------------------------------------------------
+ * Pipeline Stream Management
+ */
+
+/**
+ * xvip_subdev_set_streaming - Find and update streaming status of subdev
+ * @xdev: Composite video device
+ * @subdev: V4L2 sub-device
+ * @enable: enable/disable streaming status
+ *
+ * Walk the xvip graph entities list and find if subdev is present. Returns
+ * streaming status of subdev and update the status as requested
+ *
+ * Return: streaming status (true or false) if successful or warn_on if subdev
+ * is not present and return false
+ */
+static bool xvip_subdev_set_streaming(struct xvip_m2m_dev *xdev,
+ struct v4l2_subdev *subdev, bool enable)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list)
+ if (entity->node == subdev->dev->of_node) {
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+ }
+
+ WARN(1, "Should never get here\n");
+ return false;
+}
+
+static int xvip_entity_start_stop(struct xvip_m2m_dev *xdev,
+ struct media_entity *entity, bool start)
+{
+ struct v4l2_subdev *subdev;
+ bool is_streaming;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ start ? "Starting" : "Stopping", entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* This is to maintain list of stream on/off devices */
+ is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (start && !is_streaming) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_subdev_set_streaming(xdev, subdev, 0);
+ }
+ } else if (!start && is_streaming) {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_subdev_set_streaming(xdev, subdev, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ return ret;
+}
+
+/**
+ * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
+ * @xdev: Composite video device
+ * @dma: xvip dma
+ * @start: Start (when true) or stop (when false) the pipeline
+ *
+ * Walk the entities chain starting @dma and start or stop all of them
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise.
+ */
+static int xvip_pipeline_start_stop(struct xvip_m2m_dev *xdev,
+ struct xvip_m2m_dma *dma, bool start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &dma->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct xventity_list *temp, *_temp;
+ LIST_HEAD(ent_list);
+ int ret = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the subdev nodes */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ goto error;
+
+ media_graph_walk_start(&graph, entity);
+
+ /* get the list of entities */
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xventity_list *ele;
+
+ /* We want to stream on/off only subdevs */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ /* Maintain the pipeline sequence in a list */
+ ele = kzalloc(sizeof(*ele), GFP_KERNEL);
+ if (!ele) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ele->entity = entity;
+ list_add(&ele->list, &ent_list);
+ }
+
+ if (start) {
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ /* Enable all subdevs from sink to source */
+ ret = xvip_entity_start_stop(xdev, temp->entity, start);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, temp->entity->name);
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
+ /* Enable all subdevs from source to sink */
+ xvip_entity_start_stop(xdev, temp->entity, start);
+ }
+
+ list_for_each_entry_safe(temp, _temp, &ent_list, list) {
+ list_del(&temp->list);
+ kfree(temp);
+ }
+
+error:
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+ return ret;
+}
+
+/**
+ * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
+ * @pipe: The pipeline
+ * @on: Turn the stream on when true or off when false
+ *
+ * The pipeline is shared between all DMA engines connect at its input and
+ * output. While the stream state of DMA engines can be controlled
+ * independently, pipelines have a shared stream state that enable or disable
+ * all entities in the pipeline. For this reason the pipeline uses a streaming
+ * counter that tracks the number of DMA engines that have requested the stream
+ * to be enabled. This will walk the graph starting from each DMA and enable or
+ * disable the entities in the path.
+ *
+ * When called with the @on argument set to true, this function will increment
+ * the pipeline streaming count. If the streaming count reaches the number of
+ * DMA engines in the pipeline it will enable all entities that belong to the
+ * pipeline.
+ *
+ * Similarly, when called with the @on argument set to false, this function will
+ * decrement the pipeline streaming count and disable all entities in the
+ * pipeline when the streaming count reaches zero.
+ *
+ * Return: 0 if successful, or the return value of the failed video::s_stream
+ * operation otherwise. Stopping the pipeline never fails. The pipeline state is
+ * not updated when the operation fails.
+ */
+static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
+{
+ struct xvip_m2m_dev *xdev;
+ struct xvip_m2m_dma *dma;
+ int ret = 0;
+
+ mutex_lock(&pipe->lock);
+ xdev = pipe->xdev;
+ dma = xdev->dma;
+
+ if (on) {
+ ret = xvip_pipeline_start_stop(xdev, dma, true);
+ if (ret < 0)
+ goto done;
+ pipe->stream_count++;
+ } else {
+ if (--pipe->stream_count == 0)
+ xvip_pipeline_start_stop(xdev, dma, false);
+ }
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *start)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &start->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int num_inputs = 0;
+ unsigned int num_outputs = 0;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct xvip_m2m_dma *dma;
+
+ if (entity->function != MEDIA_ENT_F_IO_V4L)
+ continue;
+
+ dma = to_xvip_dma(media_entity_to_video_device(entity));
+
+ num_outputs++;
+ num_inputs++;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need at least one DMA to proceed */
+ if (num_outputs == 0 && num_inputs == 0)
+ return -EPIPE;
+
+ pipe->num_dmas = num_inputs + num_outputs;
+ pipe->xdev = start->xdev;
+
+ return 0;
+}
+
+static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ pipe->num_dmas = 0;
+}
+
+/**
+ * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
+ * @pipe: the pipeline
+ *
+ * Decrease the pipeline use count and clean it up if we were the last user.
+ */
+static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0)
+ __xvip_pipeline_cleanup(pipe);
+
+ mutex_unlock(&pipe->lock);
+}
+
+/**
+ * xvip_pipeline_prepare - Prepare the pipeline for streaming
+ * @pipe: the pipeline
+ * @dma: DMA engine at one end of the pipeline
+ *
+ * Validate the pipeline if no user exists yet, otherwise just increase the use
+ * count.
+ *
+ * Return: 0 if successful or -EPIPE if the pipeline is not valid.
+ */
+static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
+ struct xvip_m2m_dma *dma)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = xvip_pipeline_validate(pipe, dma);
+ if (ret < 0) {
+ __xvip_pipeline_cleanup(pipe);
+ goto done;
+ }
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static void xvip_m2m_dma_callback_mem2dev(void *data)
+{
+}
+
+static void xvip_m2m_dma_callback(void *data)
+{
+ struct xvip_m2m_ctx *ctx = data;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ spin_lock(&xdev->queued_lock);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->timecode = src_vb->timecode;
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(xdev->m2m_dev, ctx->fh.m2m_ctx);
+ spin_unlock(&xdev->queued_lock);
+}
+
+/*
+ * Queue operations
+ */
+
+static int xvip_m2m_queue_setup(struct vb2_queue *vq,
+ u32 *nbuffers, u32 *nplanes,
+ u32 sizes[], struct device *alloc_devs[])
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vq);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ if (*nplanes) {
+ if (*nplanes != f->fmt.pix_mp.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+ } else {
+ *nplanes = info->buffers;
+ for (i = 0; i < info->buffers; i++)
+ sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct v4l2_format *f;
+ const struct xvip_video_format *info;
+ u32 i;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ f = &dma->outfmt;
+ info = dma->outinfo;
+ } else {
+ f = &dma->capfmt;
+ info = dma->capinfo;
+ }
+
+ for (i = 0; i < info->buffers; i++) {
+ if (vb2_plane_size(vb, i) <
+ f->fmt.pix_mp.plane_fmt[i].sizeimage) {
+ dev_err(ctx->xdev->dev,
+ "insufficient plane size (%u < %u)\n",
+ (u32)vb2_plane_size(vb, i),
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i,
+ f->fmt.pix_mp.plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static void xvip_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void xvip_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
+ struct vb2_v4l2_buffer *vbuf;
+
+ dma->crop = false;
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dmaengine_terminate_sync(dma->chan_tx);
+ else
+ dmaengine_terminate_sync(dma->chan_rx);
+
+ if (ctx->xdev->num_subdevs) {
+ /* Stop the pipeline. */
+ xvip_pipeline_set_stream(pipe, false);
+
+ /* Cleanup the pipeline and mark it as being stopped. */
+ xvip_pipeline_cleanup(pipe);
+ media_pipeline_stop(&dma->video.entity);
+ }
+
+ for (;;) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!vbuf)
+ return;
+
+ spin_lock(&ctx->xdev->queued_lock);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock(&ctx->xdev->queued_lock);
+ }
+}
+
+static int xvip_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xvip_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct xvip_pipeline *pipe;
+ int ret;
+
+ if (!xdev->num_subdevs)
+ return 0;
+
+ pipe = dma->video.entity.pipe
+ ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
+
+ ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
+ if (ret < 0)
+ goto error;
+
+ /* Verify that the configured format matches the output of the
+ * connected subdev.
+ */
+ ret = xvip_dma_verify_format(dma);
+ if (ret < 0)
+ goto error_stop;
+
+ ret = xvip_pipeline_prepare(pipe, dma);
+ if (ret < 0)
+ goto error_stop;
+
+ /* Start the pipeline. */
+ ret = xvip_pipeline_set_stream(pipe, true);
+ if (ret < 0)
+ goto error_stop;
+
+ return 0;
+error_stop:
+ media_pipeline_stop(&dma->video.entity);
+
+error:
+ xvip_m2m_stop_streaming(q);
+
+ return ret;
+}
+
+static const struct vb2_ops m2m_vb2_ops = {
+ .queue_setup = xvip_m2m_queue_setup,
+ .buf_prepare = xvip_m2m_buf_prepare,
+ .buf_queue = xvip_m2m_buf_queue,
+ .start_streaming = xvip_m2m_start_streaming,
+ .stop_streaming = xvip_m2m_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int xvip_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &m2m_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &m2m_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->xdev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ strlcpy(cap->driver, XVIP_M2M_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, XVIP_M2M_NAME, sizeof(cap->card));
+ strlcpy(cap->bus_info, XVIP_M2M_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int
+xvip_m2m_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *fmtinfo;
+ const struct xvip_video_format *fmt;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format v4l_fmt;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ u32 i, fmt_cnt, *fmts;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_rx,
+ &fmt_cnt, &fmts);
+ else
+ ret = xilinx_xdma_get_v4l2_vid_fmts(dma->chan_tx,
+ &fmt_cnt, &fmts);
+ if (ret)
+ return ret;
+
+ if (f->index >= fmt_cnt)
+ return -EINVAL;
+
+ if (!xdev->num_subdevs) {
+ fmt = xvip_get_format_by_fourcc(fmts[f->index]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ /* Establish media pad format */
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SOURCE],
+ &v4l_fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev(&dma->pads[XVIP_PAD_SINK],
+ &v4l_fmt.pad);
+ if (!subdev)
+ return -EPIPE;
+
+ v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ for (i = 0; i < fmt_cnt; i++) {
+ fmt = xvip_get_format_by_fourcc(fmts[i]);
+ if (IS_ERR(fmt))
+ return PTR_ERR(fmt);
+
+ if (fmt->code == v4l_fmt.format.code)
+ break;
+ }
+
+ if (i >= fmt_cnt)
+ return -EINVAL;
+
+ fmtinfo = xvip_get_format_by_fourcc(fmts[i]);
+ f->pixelformat = fmtinfo->fourcc;
+
+ return 0;
+}
+
+static int xvip_m2m_get_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ f->fmt.pix_mp = dma->outfmt.fmt.pix_mp;
+ else
+ f->fmt.pix_mp = dma->capfmt.fmt.pix_mp;
+
+ return 0;
+}
+
+static int __xvip_m2m_try_fmt(struct xvip_m2m_ctx *ctx, struct v4l2_format *f)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct v4l2_plane_pix_format *plane_fmt;
+ u32 align, min_width, max_width;
+ u32 bpl, min_bpl, max_bpl;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 i, plane_width, plane_height;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (xdev->num_subdevs) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SOURCE], &fmt.pad);
+ else
+ subdev = xvip_dma_remote_subdev
+ (&dma->pads[XVIP_PAD_SINK], &fmt.pad);
+
+ if (!subdev)
+ return -EPIPE;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return -EINVAL;
+ }
+
+ pix_mp = &f->fmt.pix_mp;
+ plane_fmt = pix_mp->plane_fmt;
+ info = xvip_get_format_by_fourcc(f->fmt.pix_mp.pixelformat);
+ if (info) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outinfo = info;
+ else
+ dma->capinfo = info;
+ } else {
+ info = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ }
+
+ if (xdev->num_subdevs) {
+ if (info->code != fmt.format.code ||
+ fmt.format.width != pix_mp->width ||
+ fmt.format.height != pix_mp->height) {
+ dev_err(xdev->dev, "Failed to set format\n");
+ dev_info(xdev->dev,
+ "Reqed Code = %d, Width = %d, Height = %d\n",
+ info->code, pix_mp->width, pix_mp->height);
+ dev_info(xdev->dev,
+ "Subdev Code = %d, Width = %d, Height = %d",
+ fmt.format.code, fmt.format.width,
+ fmt.format.height);
+ return -EINVAL;
+ }
+ }
+
+ xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
+
+ /*
+ * V4L2 specification suggests the driver corrects the format struct
+ * if any of the dimensions is unsupported
+ */
+ align = lcm(dma->align, info->bpp >> 3);
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, align);
+ pix_mp->width = clamp(pix_mp->width, min_width, max_width);
+ pix_mp->height = clamp(pix_mp->height, XVIP_M2M_MIN_HEIGHT,
+ XVIP_M2M_MAX_HEIGHT);
+
+ /*
+ * Clamp the requested bytes per line value. If the maximum
+ * bytes per line value is zero, the module doesn't support
+ * user configurable line sizes. Override the requested value
+ * with the minimum in that case.
+ */
+ max_bpl = rounddown(XVIP_M2M_MAX_WIDTH, align);
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ min_bpl = (pix_mp->width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = roundup(plane_fmt[0].bytesperline, align);
+ plane_fmt[0].bytesperline = clamp(bpl, min_bpl, max_bpl);
+
+ if (info->num_planes == 1) {
+ /* Single plane formats */
+ plane_fmt[0].sizeimage = plane_fmt[0].bytesperline *
+ pix_mp->height;
+ } else {
+ /* Multi plane formats in contiguous buffer*/
+ plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(plane_fmt[0].bytesperline *
+ pix_mp->height *
+ info->bpp, 8);
+ }
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ for (i = 0; i < info->num_planes; i++) {
+ plane_width = pix_mp->width / (i ? info->hsub : 1);
+ plane_height = pix_mp->height / (i ? info->vsub : 1);
+ min_bpl = (plane_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ min_bpl = roundup(min_bpl, align);
+ bpl = rounddown(plane_fmt[i].bytesperline, align);
+ plane_fmt[i].bytesperline = clamp(bpl, min_bpl,
+ max_bpl);
+ plane_fmt[i].sizeimage = plane_fmt[i].bytesperline *
+ plane_height;
+ }
+ }
+
+ return 0;
+}
+
+static int xvip_m2m_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ int ret;
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int xvip_m2m_set_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->xdev->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __xvip_m2m_try_fmt(ctx, f);
+ if (ret < 0)
+ return ret;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dma->outfmt.fmt.pix_mp = f->fmt.pix_mp;
+ else
+ dma->capfmt.fmt.pix_mp = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int
+xvip_m2m_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dma->r.width;
+ s->r.height = dma->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+xvip_m2m_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct xvip_m2m_ctx *ctx = file2ctx(file);
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ u32 min_width, max_width;
+ int ret = 0;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ ret = -ENOTTY;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->r.width > dma->outfmt.fmt.pix_mp.width ||
+ s->r.height > dma->outfmt.fmt.pix_mp.height ||
+ s->r.top != 0 || s->r.left != 0)
+ return -EINVAL;
+
+ dma->crop = true;
+ min_width = roundup(XVIP_M2M_MIN_WIDTH, dma->align);
+ max_width = rounddown(XVIP_M2M_MAX_WIDTH, dma->align);
+ dma->r.width = clamp(s->r.width, min_width, max_width);
+ dma->r.height = s->r.height;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops xvip_m2m_ioctl_ops = {
+ .vidioc_querycap = xvip_dma_querycap,
+
+ .vidioc_enum_fmt_vid_cap = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = xvip_m2m_set_fmt,
+
+ .vidioc_enum_fmt_vid_out = xvip_m2m_enum_fmt,
+ .vidioc_g_fmt_vid_out_mplane = xvip_m2m_get_fmt,
+ .vidioc_try_fmt_vid_out_mplane = xvip_m2m_try_fmt,
+ .vidioc_s_fmt_vid_out_mplane = xvip_m2m_set_fmt,
+ .vidioc_s_selection = xvip_m2m_s_selection,
+ .vidioc_g_selection = xvip_m2m_g_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+/*
+ * File operations
+ */
+static int xvip_m2m_open(struct file *file)
+{
+ struct xvip_m2m_dev *xdev = video_drvdata(file);
+ struct xvip_m2m_ctx *ctx = NULL;
+ int ret;
+
+ ctx = devm_kzalloc(xdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->xdev = xdev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(xdev->m2m_dev, ctx,
+ &xvip_m2m_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ v4l2_fh_exit(&ctx->fh);
+ return ret;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+ dev_info(xdev->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static int xvip_m2m_release(struct file *file)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ return 0;
+}
+
+static u32 xvip_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+ int ret;
+
+ mutex_lock(&ctx->xdev->lock);
+ ret = v4l2_m2m_poll(file, ctx->fh.m2m_ctx, wait);
+ mutex_unlock(&ctx->xdev->lock);
+
+ return ret;
+}
+
+static int xvip_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xvip_m2m_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->fh.m2m_ctx, vma);
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static int xvip_m2m_job_ready(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) > 0))
+ return 1;
+
+ return 0;
+}
+
+static void xvip_m2m_job_abort(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ v4l2_m2m_job_finish(ctx->xdev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void xvip_m2m_prep_submit_dev2mem_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *dst_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_out;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+ u32 bpl, dst_width, dst_height;
+
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ if (!p_out) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_DEV_TO_MEM;
+ ctx->xt.src_sgl = false;
+ ctx->xt.dst_sgl = true;
+ ctx->xt.dst_start = p_out;
+
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+ if (dma->crop) {
+ dst_width = dma->r.width;
+ dst_height = dma->r.height;
+ } else {
+ dst_width = pix_mp->width;
+ dst_height = pix_mp->height;
+ }
+
+ info = dma->capinfo;
+ xilinx_xdma_set_mode(dma->chan_rx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_rx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (dst_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
+ ctx->xt.numf = dst_height;
+
+ /*
+ * dst_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].src_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].dst_icg = 0;
+ if (dma->crop)
+ ctx->sgl[0].dst_icg = bpl *
+ (pix_mp->height - dst_height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_cap =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ luma_size = pix_mp->plane_fmt[0].bytesperline *
+ ctx->xt.numf;
+ if (chroma_cap > p_out)
+ ctx->sgl[0].dst_icg = chroma_cap - p_out -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_rx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA rx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_rx);
+}
+
+static void xvip_m2m_prep_submit_mem2dev_desc(struct xvip_m2m_ctx *ctx,
+ struct vb2_v4l2_buffer *src_buf)
+{
+ struct xvip_m2m_dma *dma = ctx->xdev->dma;
+ struct xvip_m2m_dev *xdev = ctx->xdev;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t p_in;
+ const struct xvip_video_format *info;
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 padding_factor_nume, padding_factor_deno;
+ u32 bpl_nume, bpl_deno;
+ u32 luma_size;
+ u32 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ enum operation_mode mode = DEFAULT;
+ u32 bpl, src_width, src_height;
+
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+
+ if (!p_in) {
+ dev_err(xdev->dev,
+ "Acquiring kernel pointer to buffer failed\n");
+ return;
+ }
+
+ ctx->xt.dir = DMA_MEM_TO_DEV;
+ ctx->xt.src_sgl = true;
+ ctx->xt.dst_sgl = false;
+ ctx->xt.src_start = p_in;
+
+ pix_mp = &dma->outfmt.fmt.pix_mp;
+ bpl = pix_mp->plane_fmt[0].bytesperline;
+ if (dma->crop) {
+ src_width = dma->r.width;
+ src_height = dma->r.height;
+ } else {
+ src_width = pix_mp->width;
+ src_height = pix_mp->height;
+ }
+
+ info = dma->outinfo;
+ xilinx_xdma_set_mode(dma->chan_tx, mode);
+ xilinx_xdma_v4l2_config(dma->chan_tx, pix_mp->pixelformat);
+ xvip_width_padding_factor(pix_mp->pixelformat, &padding_factor_nume,
+ &padding_factor_deno);
+ xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume, &bpl_deno);
+
+ ctx->xt.frame_size = info->num_planes;
+ ctx->sgl[0].size = (src_width * info->bpl_factor *
+ padding_factor_nume * bpl_nume) /
+ (padding_factor_deno * bpl_deno);
+ ctx->sgl[0].icg = bpl - ctx->sgl[0].size;
+ ctx->xt.numf = src_height;
+
+ /*
+ * src_icg is the number of bytes to jump after last luma addr
+ * and before first chroma addr
+ */
+ ctx->sgl[0].dst_icg = 0;
+
+ if (info->buffers == 1) {
+ /* Handling contiguous data with mplanes */
+ ctx->sgl[0].src_icg = 0;
+ if (dma->crop)
+ ctx->sgl[0].src_icg = bpl *
+ (pix_mp->height - src_height);
+ } else {
+ /* Handling non-contiguous data with mplanes */
+ if (info->buffers == 2) {
+ dma_addr_t chroma_out =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 1);
+ luma_size = bpl * ctx->xt.numf;
+ if (chroma_out > p_in)
+ ctx->sgl[0].src_icg = chroma_out - p_in -
+ luma_size;
+ }
+ }
+
+ desc = dmaengine_prep_interleaved_dma(dma->chan_tx, &ctx->xt, flags);
+ if (!desc) {
+ dev_err(xdev->dev, "Failed to prepare DMA tx transfer\n");
+ return;
+ }
+
+ desc->callback = xvip_m2m_dma_callback_mem2dev;
+ desc->callback_param = ctx;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->chan_tx);
+}
+
+/**
+ * xvip_m2m_device_run - prepares and starts the device
+ *
+ * @priv: Instance private data
+ *
+ * This simulates all the immediate preparations required before starting
+ * a device. This will be called by the framework when it decides to schedule
+ * a particular instance.
+ */
+static void xvip_m2m_device_run(void *priv)
+{
+ struct xvip_m2m_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Prepare and submit mem2dev transaction */
+ xvip_m2m_prep_submit_mem2dev_desc(ctx, src_buf);
+
+ /* Prepare and submit dev2mem transaction */
+ xvip_m2m_prep_submit_dev2mem_desc(ctx, dst_buf);
+}
+
+static const struct v4l2_file_operations xvip_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = xvip_m2m_open,
+ .release = xvip_m2m_release,
+ .poll = xvip_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xvip_m2m_mmap,
+};
+
+static struct video_device xvip_m2m_videodev = {
+ .name = XVIP_M2M_NAME,
+ .fops = &xvip_m2m_fops,
+ .ioctl_ops = &xvip_m2m_ioctl_ops,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .vfl_type = VFL_TYPE_VIDEO,
+};
+
+static const struct v4l2_m2m_ops xvip_m2m_ops = {
+ .device_run = xvip_m2m_device_run,
+ .job_ready = xvip_m2m_job_ready,
+ .job_abort = xvip_m2m_job_abort,
+};
+
+static int xvip_m2m_dma_init(struct xvip_m2m_dma *dma)
+{
+ struct xvip_m2m_dev *xdev;
+ struct v4l2_pix_format_mplane *pix_mp;
+ int ret;
+
+ xdev = dma->xdev;
+ mutex_init(&xdev->lock);
+ mutex_init(&dma->pipe.lock);
+ spin_lock_init(&xdev->queued_lock);
+
+ /* Format info on capture port - NV12 is the default format */
+ dma->capinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->capinfo->fourcc;
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->capinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->capinfo->bpp, 8);
+
+ /* Format info on output port - NV12 is the default format */
+ dma->outinfo = xvip_get_format_by_fourcc(XVIP_M2M_DEFAULT_FMT);
+ pix_mp = &dma->capfmt.fmt.pix_mp;
+ pix_mp->pixelformat = dma->outinfo->fourcc;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = XVIP_M2M_DEF_WIDTH;
+ pix_mp->height = XVIP_M2M_DEF_HEIGHT;
+ pix_mp->plane_fmt[0].bytesperline = pix_mp->width *
+ dma->outinfo->bpl_factor;
+ pix_mp->plane_fmt[0].sizeimage =
+ DIV_ROUND_UP(pix_mp->plane_fmt[0].bytesperline *
+ pix_mp->height * dma->outinfo->bpp, 8);
+
+ /* DMA channels for mem2mem */
+ dma->chan_tx = dma_request_chan(xdev->dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA tx channel not found");
+
+ return ret;
+ }
+
+ dma->chan_rx = dma_request_chan(xdev->dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -EPROBE_DEFER)
+ dev_err(xdev->dev, "mem2mem DMA rx channel not found");
+
+ goto tx;
+ }
+
+ dma->align = BIT(dma->chan_tx->device->copy_align);
+
+ /* Video node */
+ dma->video = xvip_m2m_videodev;
+ dma->video.v4l2_dev = &xdev->v4l2_dev;
+ dma->video.lock = &xdev->lock;
+
+ dma->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ dma->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&dma->video.entity, 2, dma->pads);
+ if (ret < 0)
+ goto error;
+
+ ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(xdev->dev, "Failed to register mem2mem video device\n");
+ goto tx_rx;
+ }
+
+ video_set_drvdata(&dma->video, dma->xdev);
+ return 0;
+
+tx_rx:
+ dma_release_channel(dma->chan_rx);
+tx:
+ dma_release_channel(dma->chan_tx);
+error:
+ return ret;
+}
+
+static void xvip_m2m_dma_deinit(struct xvip_m2m_dma *dma)
+{
+ if (video_is_registered(&dma->video))
+ video_unregister_device(&dma->video);
+
+ mutex_destroy(&dma->pipe.lock);
+ mutex_destroy(&dma->xdev->lock);
+ dma_release_channel(dma->chan_tx);
+ dma_release_channel(dma->chan_rx);
+}
+
+static int xvip_m2m_dma_alloc_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_m2m_dma *dma = NULL;
+ int ret;
+
+ dma = devm_kzalloc(xdev->dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->xdev = xdev;
+ xdev->dma = dma;
+
+ ret = xvip_m2m_dma_init(xdev->dma);
+ if (ret) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ return ret;
+ }
+
+ xdev->v4l2_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+static void xvip_composite_v4l2_cleanup(struct xvip_m2m_dev *xdev)
+{
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ media_device_unregister(&xdev->media_dev);
+ media_device_cleanup(&xdev->media_dev);
+}
+
+static int xvip_composite_v4l2_init(struct xvip_m2m_dev *xdev)
+{
+ int ret;
+
+ xdev->media_dev.dev = xdev->dev;
+ strlcpy(xdev->media_dev.model, "Xilinx Videoi M2M Composite Device",
+ sizeof(xdev->media_dev.model));
+ xdev->media_dev.hw_revision = 0;
+
+ media_device_init(&xdev->media_dev);
+
+ xdev->v4l2_dev.mdev = &xdev->media_dev;
+ ret = v4l2_device_register(xdev->dev, &xdev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ media_device_cleanup(&xdev->media_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct xvip_graph_entity *
+xvip_graph_find_entity(struct xvip_m2m_dev *xdev,
+ const struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node == node)
+ return entity;
+ }
+
+ return NULL;
+}
+
+static int xvip_graph_build_one(struct xvip_m2m_dev *xdev,
+ struct xvip_graph_entity *entity)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct media_entity *local = entity->entity;
+ struct media_entity *remote;
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for entity %s\n", local->name);
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(entity->node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ /* Skip sink ports, they will be processed from the other end of
+ * the link.
+ */
+ if (link.local_port >= local->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u for %pOF\n",
+ link.local_port,
+ to_of_node(link.local_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ local_pad = &local->pads[link.local_port];
+
+ if (local_pad->flags & MEDIA_PAD_FL_SINK) {
+ dev_dbg(xdev->dev, "skipping sink port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Skip DMA engines, they will be processed separately. */
+ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
+ dev_dbg(xdev->dev, "skipping DMA port %pOF:%u\n",
+ to_of_node(link.local_node),
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
+ continue;
+ }
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+
+ remote = ent->entity;
+
+ if (link.remote_port >= remote->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ remote_pad = &remote->pads[link.remote_port];
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+
+ ret = media_create_pad_link(local, local_pad->index,
+ remote, remote_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ local->name, local_pad->index,
+ remote->name, remote_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_parse_one(struct xvip_m2m_dev *xdev,
+ struct device_node *node)
+{
+ struct xvip_graph_entity *entity;
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "parsing node %pOF\n", node);
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ dev_dbg(xdev->dev, "handling endpoint %pOF %s\n",
+ ep, ep->name);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ ret = -EINVAL;
+ break;
+ }
+ dev_dbg(xdev->dev, "Remote endpoint %pOF %s\n",
+ remote, remote->name);
+
+ /* Skip entities that we have already processed. */
+ if (remote == xdev->dev->of_node ||
+ xvip_graph_find_entity(xdev, remote)) {
+ of_node_put(remote);
+ continue;
+ }
+
+ entity = devm_kzalloc(xdev->dev, sizeof(*entity), GFP_KERNEL);
+ if (!entity) {
+ of_node_put(remote);
+ ret = -ENOMEM;
+ break;
+ }
+
+ entity->node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ entity->asd.match.fwnode = of_fwnode_handle(remote);
+ list_add_tail(&entity->list, &xdev->entities);
+ xdev->num_subdevs++;
+ }
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int xvip_graph_parse(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /*
+ * Walk the links to parse the full graph. Start by parsing the
+ * composite node and then parse entities in turn. The list_for_each
+ * loop will handle entities added at the end of the list while walking
+ * the links.
+ */
+ ret = xvip_graph_parse_one(xdev, xdev->dev->of_node);
+ if (ret < 0)
+ return 0;
+
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_parse_one(xdev, entity->node);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int xvip_graph_build_dma(struct xvip_m2m_dev *xdev)
+{
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ struct device_node *node = xdev->dev->of_node;
+ struct media_entity *source;
+ struct media_entity *sink;
+ struct media_pad *source_pad;
+ struct media_pad *sink_pad;
+ struct xvip_graph_entity *ent;
+ struct v4l2_fwnode_link link;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ struct xvip_m2m_dma *dma = xdev->dma;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "creating links for DMA engines\n");
+
+ while (1) {
+ /* Get the next endpoint and parse its link. */
+ next = of_graph_get_next_endpoint(node, ep);
+ if (!next)
+ break;
+
+ ep = next;
+
+ dev_dbg(xdev->dev, "processing endpoint %pOF\n", ep);
+
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
+ if (ret < 0) {
+ dev_err(xdev->dev, "failed to parse link for %pOF\n",
+ ep);
+ continue;
+ }
+
+ dev_dbg(xdev->dev, "creating link for DMA engine %s\n",
+ dma->video.name);
+
+ /* Find the remote entity. */
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
+ if (!ent) {
+ dev_err(xdev->dev, "no entity found for %pOF\n",
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -ENODEV;
+ break;
+ }
+ if (link.remote_port >= ent->entity->num_pads) {
+ dev_err(xdev->dev, "invalid port number %u on %pOF\n",
+ link.remote_port,
+ to_of_node(link.remote_node));
+ v4l2_fwnode_put_link(&link);
+ ret = -EINVAL;
+ break;
+ }
+
+ dev_dbg(xdev->dev, "Entity %s %s\n", ent->node->name,
+ ent->node->full_name);
+ dev_dbg(xdev->dev, "port number %u on %pOF\n",
+ link.remote_port, to_of_node(link.remote_node));
+ dev_dbg(xdev->dev, "local port number %u on %pOF\n",
+ link.local_port, to_of_node(link.local_node));
+
+ if (link.local_port == XVIP_PAD_SOURCE) {
+ source = &dma->video.entity;
+ source_pad = &dma->pads[XVIP_PAD_SOURCE];
+ sink = ent->entity;
+ sink_pad = &sink->pads[XVIP_PAD_SINK];
+
+ } else {
+ source = ent->entity;
+ source_pad = &source->pads[XVIP_PAD_SOURCE];
+ sink = &dma->video.entity;
+ sink_pad = &dma->pads[XVIP_PAD_SINK];
+ }
+
+ v4l2_fwnode_put_link(&link);
+
+ /* Create the media link. */
+ dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(xdev->dev,
+ "failed to create %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
+
+ /* Create links for every entity. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = xvip_graph_build_one(xdev, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create links for DMA channels. */
+ ret = xvip_graph_build_dma(xdev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&xdev->v4l2_dev);
+ if (ret < 0)
+ dev_err(xdev->dev, "failed to register subdev nodes\n");
+
+ return media_device_register(&xdev->media_dev);
+}
+
+static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct xvip_m2m_dev *xdev =
+ container_of(notifier, struct xvip_m2m_dev, notifier);
+ struct xvip_graph_entity *entity;
+
+ /* Locate the entity corresponding to the bound subdev and store the
+ * subdev pointer.
+ */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ if (entity->node != subdev->dev->of_node)
+ continue;
+
+ if (entity->subdev) {
+ dev_err(xdev->dev, "duplicate subdev for node %pOF\n",
+ entity->node);
+ return -EINVAL;
+ }
+
+ dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
+ return 0;
+ }
+
+ dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
+ return -EINVAL;
+}
+
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
+static void xvip_graph_cleanup(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entityp;
+ struct xvip_graph_entity *entity;
+
+ v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_notifier_unregister(&xdev->notifier);
+
+ list_for_each_entry_safe(entity, entityp, &xdev->entities, list) {
+ of_node_put(entity->node);
+ list_del(&entity->list);
+ }
+}
+
+static int xvip_graph_init(struct xvip_m2m_dev *xdev)
+{
+ struct xvip_graph_entity *entity;
+ int ret;
+
+ /* Init the DMA channels. */
+ ret = xvip_m2m_dma_alloc_init(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "DMA initialization failed\n");
+ goto done;
+ }
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = xvip_graph_parse(xdev);
+ if (ret < 0) {
+ dev_err(xdev->dev, "graph parsing failed\n");
+ goto done;
+ }
+ dev_dbg(xdev->dev, "Number of subdev = %d\n", xdev->num_subdevs);
+
+ if (!xdev->num_subdevs) {
+ dev_err(xdev->dev, "no subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ list_for_each_entry(entity, &xdev->entities, list) {
+ ret = v4l2_async_notifier_add_subdev(&xdev->notifier,
+ &entity->asd);
+ if (ret)
+ goto done;
+ }
+
+ xdev->notifier.ops = &xvip_graph_notify_ops;
+
+ ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ if (ret < 0) {
+ dev_err(xdev->dev, "notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0)
+ xvip_graph_cleanup(xdev);
+
+ return ret;
+}
+
+static int xvip_composite_remove(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = platform_get_drvdata(pdev);
+
+ xvip_graph_cleanup(xdev);
+ xvip_composite_v4l2_cleanup(xdev);
+
+ return 0;
+}
+
+static int xvip_m2m_probe(struct platform_device *pdev)
+{
+ struct xvip_m2m_dev *xdev = NULL;
+ int ret;
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&xdev->entities);
+
+ ret = xvip_composite_v4l2_init(xdev);
+ if (ret)
+ return -EINVAL;
+
+ ret = xvip_graph_init(xdev);
+ if (ret < 0)
+ goto error;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_cleanup;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ xdev->m2m_dev = v4l2_m2m_init(&xvip_m2m_ops);
+ if (IS_ERR(xdev->m2m_dev)) {
+ dev_err(xdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(xdev->m2m_dev);
+ goto dma_cleanup;
+ }
+
+ dev_info(xdev->dev, "mem2mem device registered\n");
+ return 0;
+
+dma_cleanup:
+ xvip_m2m_dma_deinit(xdev->dma);
+
+error:
+ v4l2_device_unregister(&xdev->v4l2_dev);
+ return ret;
+}
+
+static int xvip_m2m_remove(struct platform_device *pdev)
+{
+ xvip_composite_remove(pdev);
+ return 0;
+}
+
+static const struct of_device_id xvip_m2m_of_id_table[] = {
+ { .compatible = "xlnx,mem2mem" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvip_m2m_of_id_table);
+
+static struct platform_driver xvip_m2m_driver = {
+ .driver = {
+ .name = XVIP_M2M_NAME,
+ .of_match_table = xvip_m2m_of_id_table,
+ },
+ .probe = xvip_m2m_probe,
+ .remove = xvip_m2m_remove,
+};
+
+module_platform_driver(xvip_m2m_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx V4L2 mem2mem driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
new file mode 100644
index 000000000000..65a3482aa249
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler-coeff.h
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <sureshg@xilinx.com>
+ *
+ * The file contains the coefficients used by the Xilinx
+ * Video Multi Scaler Controller driver (xm2msc)
+ *
+ */
+
+#define XSCALER_MAX_PHASES (64)
+#define XSCALER_MAX_TAPS (12)
+
+#define XSCALER_TAPS_6 (6)
+#define XSCALER_TAPS_8 (8)
+#define XSCALER_TAPS_10 (10)
+#define XSCALER_TAPS_12 (12)
+
+/* Filter bank ID for various filter tap configurations */
+enum xm2mvsc_filter_bank_id {
+ FILTER_BANK_TAPS_6 = 0,
+ FILTER_BANK_TAPS_8,
+ FILTER_BANK_TAPS_10,
+ FILTER_BANK_TAPS_12,
+};
+
+/* H-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xhsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xhsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xhsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xhsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
+
+/* V-scaler coefficients for 6, 8, 10 and 12 tap filters */
+static const short
+xvsc_coeff_taps6[XSCALER_MAX_PHASES][XSCALER_TAPS_6] = {
+ {-132, 236, 3824, 236, -132, 64, },
+ {-116, 184, 3816, 292, -144, 64, },
+ {-100, 132, 3812, 348, -160, 64, },
+ {-88, 84, 3808, 404, -176, 64, },
+ {-72, 36, 3796, 464, -192, 64, },
+ {-60, -8, 3780, 524, -208, 68, },
+ {-48, -52, 3768, 588, -228, 68, },
+ {-32, -96, 3748, 652, -244, 68, },
+ {-20, -136, 3724, 716, -260, 72, },
+ {-8, -172, 3696, 784, -276, 72, },
+ {0, -208, 3676, 848, -292, 72, },
+ {12, -244, 3640, 920, -308, 76, },
+ {20, -276, 3612, 988, -324, 76, },
+ {32, -304, 3568, 1060, -340, 80, },
+ {40, -332, 3532, 1132, -356, 80, },
+ {48, -360, 3492, 1204, -372, 84, },
+ {56, -384, 3448, 1276, -388, 88, },
+ {64, -408, 3404, 1352, -404, 88, },
+ {72, -428, 3348, 1428, -416, 92, },
+ {76, -448, 3308, 1500, -432, 92, },
+ {84, -464, 3248, 1576, -444, 96, },
+ {88, -480, 3200, 1652, -460, 96, },
+ {92, -492, 3140, 1728, -472, 100, },
+ {96, -504, 3080, 1804, -484, 104, },
+ {100, -516, 3020, 1880, -492, 104, },
+ {104, -524, 2956, 1960, -504, 104, },
+ {104, -532, 2892, 2036, -512, 108, },
+ {108, -540, 2832, 2108, -520, 108, },
+ {108, -544, 2764, 2184, -528, 112, },
+ {112, -544, 2688, 2260, -532, 112, },
+ {112, -548, 2624, 2336, -540, 112, },
+ {112, -548, 2556, 2408, -544, 112, },
+ {112, -544, 2480, 2480, -544, 112, },
+ {112, -544, 2408, 2556, -548, 112, },
+ {112, -540, 2336, 2624, -548, 112, },
+ {112, -532, 2260, 2688, -544, 112, },
+ {112, -528, 2184, 2764, -544, 108, },
+ {108, -520, 2108, 2832, -540, 108, },
+ {108, -512, 2036, 2892, -532, 104, },
+ {104, -504, 1960, 2956, -524, 104, },
+ {104, -492, 1880, 3020, -516, 100, },
+ {104, -484, 1804, 3080, -504, 96, },
+ {100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+static const short
+xvsc_coeff_taps8[XSCALER_MAX_PHASES][XSCALER_TAPS_8] = {
+ {-5, 309, 1023, 1445, 1034, 317, -3, -24, },
+ {-6, 300, 1011, 1445, 1045, 326, -1, -24, },
+ {-7, 291, 1000, 1444, 1056, 336, 0, -24, },
+ {-9, 282, 988, 1444, 1067, 345, 2, -24, },
+ {-10, 274, 977, 1443, 1078, 354, 4, -24, },
+ {-11, 266, 965, 1441, 1089, 364, 6, -24, },
+ {-12, 258, 953, 1440, 1100, 373, 8, -24, },
+ {-13, 250, 942, 1438, 1110, 383, 10, -24, },
+ {-14, 242, 930, 1437, 1121, 393, 12, -24, },
+ {-15, 234, 918, 1434, 1131, 403, 14, -24, },
+ {-16, 226, 906, 1432, 1142, 413, 17, -24, },
+ {-17, 219, 894, 1430, 1152, 423, 19, -24, },
+ {-17, 211, 882, 1427, 1162, 433, 22, -24, },
+ {-18, 204, 870, 1424, 1172, 443, 24, -24, },
+ {-19, 197, 858, 1420, 1182, 454, 27, -24, },
+ {-19, 190, 846, 1417, 1191, 464, 30, -24, },
+ {-20, 183, 834, 1413, 1201, 475, 33, -24, },
+ {-20, 176, 822, 1409, 1210, 486, 36, -24, },
+ {-21, 170, 810, 1405, 1220, 497, 39, -24, },
+ {-21, 163, 798, 1401, 1229, 507, 42, -24, },
+ {-22, 157, 786, 1396, 1238, 518, 46, -24, },
+ {-22, 151, 774, 1392, 1247, 529, 49, -24, },
+ {-22, 144, 762, 1387, 1255, 540, 53, -24, },
+ {-23, 139, 750, 1382, 1264, 552, 57, -24, },
+ {-23, 133, 738, 1376, 1272, 563, 60, -24, },
+ {-23, 127, 726, 1371, 1280, 574, 64, -24, },
+ {-23, 121, 714, 1365, 1288, 586, 69, -24, },
+ {-23, 116, 703, 1359, 1296, 597, 73, -24, },
+ {-24, 111, 691, 1353, 1304, 609, 77, -24, },
+ {-24, 105, 679, 1346, 1312, 620, 81, -24, },
+ {-24, 100, 667, 1340, 1319, 632, 86, -24, },
+ {-24, 96, 655, 1333, 1326, 644, 91, -24, },
+ {-24, 91, 644, 1326, 1333, 655, 96, -24, },
+ {-24, 86, 632, 1319, 1340, 667, 100, -24, },
+ {-24, 81, 620, 1312, 1346, 679, 105, -24, },
+ {-24, 77, 609, 1304, 1353, 691, 111, -24, },
+ {-24, 73, 597, 1296, 1359, 703, 116, -23, },
+ {-24, 69, 586, 1288, 1365, 714, 121, -23, },
+ {-24, 64, 574, 1280, 1371, 726, 127, -23, },
+ {-24, 60, 563, 1272, 1376, 738, 133, -23, },
+ {-24, 57, 552, 1264, 1382, 750, 139, -23, },
+ {-24, 53, 540, 1255, 1387, 762, 144, -22, },
+ {-24, 49, 529, 1247, 1392, 774, 151, -22, },
+ {-24, 46, 518, 1238, 1396, 786, 157, -22, },
+ {-24, 42, 507, 1229, 1401, 798, 163, -21, },
+ {-24, 39, 497, 1220, 1405, 810, 170, -21, },
+ {-24, 36, 486, 1210, 1409, 822, 176, -20, },
+ {-24, 33, 475, 1201, 1413, 834, 183, -20, },
+ {-24, 30, 464, 1191, 1417, 846, 190, -19, },
+ {-24, 27, 454, 1182, 1420, 858, 197, -19, },
+ {-24, 24, 443, 1172, 1424, 870, 204, -18, },
+ {-24, 22, 433, 1162, 1427, 882, 211, -17, },
+ {-24, 19, 423, 1152, 1430, 894, 219, -17, },
+ {-24, 17, 413, 1142, 1432, 906, 226, -16, },
+ {-24, 14, 403, 1131, 1434, 918, 234, -15, },
+ {-24, 12, 393, 1121, 1437, 930, 242, -14, },
+ {-24, 10, 383, 1110, 1438, 942, 250, -13, },
+ {-24, 8, 373, 1100, 1440, 953, 258, -12, },
+ {-24, 6, 364, 1089, 1441, 965, 266, -11, },
+ {-24, 4, 354, 1078, 1443, 977, 274, -10, },
+ {-24, 2, 345, 1067, 1444, 988, 282, -9, },
+ {-24, 0, 336, 1056, 1444, 1000, 291, -7, },
+ {-24, -1, 326, 1045, 1445, 1011, 300, -6, },
+ {-24, -3, 317, 1034, 1445, 1023, 309, -5, },
+};
+
+static const short
+xvsc_coeff_taps10[XSCALER_MAX_PHASES][XSCALER_TAPS_10] = {
+ {59, 224, 507, 790, 911, 793, 512, 227, 61, 13, },
+ {58, 220, 502, 786, 911, 797, 516, 231, 62, 13, },
+ {56, 216, 497, 783, 911, 800, 521, 235, 64, 13, },
+ {55, 213, 492, 779, 910, 804, 526, 238, 65, 13, },
+ {54, 209, 487, 775, 910, 807, 531, 242, 67, 14, },
+ {52, 206, 482, 772, 910, 810, 536, 246, 69, 14, },
+ {51, 202, 477, 768, 909, 813, 541, 250, 70, 14, },
+ {50, 199, 473, 764, 909, 817, 545, 254, 72, 14, },
+ {48, 195, 468, 760, 908, 820, 550, 258, 74, 15, },
+ {47, 192, 463, 756, 908, 823, 555, 262, 76, 15, },
+ {46, 188, 458, 752, 907, 826, 560, 266, 78, 15, },
+ {45, 185, 453, 748, 906, 829, 565, 270, 79, 16, },
+ {44, 182, 448, 744, 906, 832, 569, 274, 81, 16, },
+ {42, 179, 444, 740, 905, 835, 574, 278, 83, 16, },
+ {41, 175, 439, 736, 904, 837, 579, 282, 85, 17, },
+ {40, 172, 434, 732, 903, 840, 584, 286, 87, 17, },
+ {39, 169, 429, 728, 902, 843, 589, 290, 89, 18, },
+ {38, 166, 425, 724, 901, 846, 593, 294, 91, 18, },
+ {37, 163, 420, 720, 900, 848, 598, 298, 93, 18, },
+ {36, 160, 415, 716, 899, 851, 603, 302, 95, 19, },
+ {35, 157, 410, 711, 897, 854, 608, 307, 98, 19, },
+ {34, 154, 406, 707, 896, 856, 612, 311, 100, 20, },
+ {33, 151, 401, 703, 895, 859, 617, 315, 102, 20, },
+ {33, 148, 396, 698, 893, 861, 622, 320, 104, 21, },
+ {32, 145, 392, 694, 892, 863, 626, 324, 107, 21, },
+ {31, 142, 387, 690, 890, 866, 631, 328, 109, 22, },
+ {30, 140, 382, 685, 889, 868, 636, 333, 111, 23, },
+ {29, 137, 378, 681, 887, 870, 640, 337, 114, 23, },
+ {28, 134, 373, 677, 886, 872, 645, 342, 116, 24, },
+ {28, 131, 369, 672, 884, 874, 649, 346, 119, 24, },
+ {27, 129, 364, 668, 882, 876, 654, 350, 121, 25, },
+ {26, 126, 359, 663, 880, 878, 659, 355, 124, 26, },
+ {26, 124, 355, 659, 878, 880, 663, 359, 126, 26, },
+ {25, 121, 350, 654, 876, 882, 668, 364, 129, 27, },
+ {24, 119, 346, 649, 874, 884, 672, 369, 131, 28, },
+ {24, 116, 342, 645, 872, 886, 677, 373, 134, 28, },
+ {23, 114, 337, 640, 870, 887, 681, 378, 137, 29, },
+ {23, 111, 333, 636, 868, 889, 685, 382, 140, 30, },
+ {22, 109, 328, 631, 866, 890, 690, 387, 142, 31, },
+ {21, 107, 324, 626, 863, 892, 694, 392, 145, 32, },
+ {21, 104, 320, 622, 861, 893, 698, 396, 148, 33, },
+ {20, 102, 315, 617, 859, 895, 703, 401, 151, 33, },
+ {20, 100, 311, 612, 856, 896, 707, 406, 154, 34, },
+ {19, 98, 307, 608, 854, 897, 711, 410, 157, 35, },
+ {19, 95, 302, 603, 851, 899, 716, 415, 160, 36, },
+ {18, 93, 298, 598, 848, 900, 720, 420, 163, 37, },
+ {18, 91, 294, 593, 846, 901, 724, 425, 166, 38, },
+ {18, 89, 290, 589, 843, 902, 728, 429, 169, 39, },
+ {17, 87, 286, 584, 840, 903, 732, 434, 172, 40, },
+ {17, 85, 282, 579, 837, 904, 736, 439, 175, 41, },
+ {16, 83, 278, 574, 835, 905, 740, 444, 179, 42, },
+ {16, 81, 274, 569, 832, 906, 744, 448, 182, 44, },
+ {16, 79, 270, 565, 829, 906, 748, 453, 185, 45, },
+ {15, 78, 266, 560, 826, 907, 752, 458, 188, 46, },
+ {15, 76, 262, 555, 823, 908, 756, 463, 192, 47, },
+ {15, 74, 258, 550, 820, 908, 760, 468, 195, 48, },
+ {14, 72, 254, 545, 817, 909, 764, 473, 199, 50, },
+ {14, 70, 250, 541, 813, 909, 768, 477, 202, 51, },
+ {14, 69, 246, 536, 810, 910, 772, 482, 206, 52, },
+ {14, 67, 242, 531, 807, 910, 775, 487, 209, 54, },
+ {13, 65, 238, 526, 804, 910, 779, 492, 213, 55, },
+ {13, 64, 235, 521, 800, 911, 783, 497, 216, 56, },
+ {13, 62, 231, 516, 797, 911, 786, 502, 220, 58, },
+ {13, 61, 227, 512, 793, 911, 790, 507, 224, 59, },
+};
+
+static const short
+xvsc_coeff_taps12[XSCALER_MAX_PHASES][XSCALER_TAPS_12] = {
+ {48, 143, 307, 504, 667, 730, 669, 507, 310, 145, 49, 18, },
+ {47, 141, 304, 501, 665, 730, 670, 510, 313, 147, 50, 18, },
+ {46, 138, 301, 498, 663, 730, 672, 513, 316, 149, 51, 18, },
+ {45, 136, 298, 495, 661, 730, 674, 516, 319, 151, 52, 18, },
+ {44, 134, 295, 492, 659, 730, 676, 519, 322, 153, 53, 18, },
+ {44, 132, 292, 489, 657, 730, 677, 522, 325, 155, 54, 18, },
+ {43, 130, 289, 486, 655, 729, 679, 525, 328, 157, 55, 19, },
+ {42, 129, 287, 483, 653, 729, 681, 528, 331, 160, 56, 19, },
+ {41, 127, 284, 480, 651, 729, 683, 531, 334, 162, 57, 19, },
+ {40, 125, 281, 477, 648, 729, 684, 534, 337, 164, 58, 19, },
+ {40, 123, 278, 474, 646, 728, 686, 537, 340, 166, 59, 20, },
+ {39, 121, 275, 471, 644, 728, 687, 539, 343, 169, 60, 20, },
+ {38, 119, 272, 468, 642, 727, 689, 542, 346, 171, 61, 20, },
+ {37, 117, 269, 465, 640, 727, 690, 545, 349, 173, 62, 20, },
+ {37, 115, 266, 461, 638, 727, 692, 548, 353, 175, 63, 21, },
+ {36, 114, 264, 458, 635, 726, 693, 551, 356, 178, 65, 21, },
+ {35, 112, 261, 455, 633, 726, 695, 554, 359, 180, 66, 21, },
+ {35, 110, 258, 452, 631, 725, 696, 556, 362, 183, 67, 21, },
+ {34, 108, 255, 449, 628, 724, 698, 559, 365, 185, 68, 22, },
+ {33, 107, 252, 446, 626, 724, 699, 562, 368, 187, 69, 22, },
+ {33, 105, 250, 443, 624, 723, 700, 565, 371, 190, 71, 22, },
+ {32, 103, 247, 440, 621, 723, 702, 567, 374, 192, 72, 23, },
+ {32, 101, 244, 437, 619, 722, 703, 570, 377, 195, 73, 23, },
+ {31, 100, 241, 433, 617, 721, 704, 573, 380, 197, 75, 23, },
+ {31, 98, 239, 430, 614, 720, 705, 576, 383, 200, 76, 24, },
+ {30, 97, 236, 427, 612, 720, 707, 578, 387, 202, 77, 24, },
+ {29, 95, 233, 424, 609, 719, 708, 581, 390, 205, 79, 24, },
+ {29, 93, 231, 421, 607, 718, 709, 584, 393, 207, 80, 25, },
+ {28, 92, 228, 418, 604, 717, 710, 586, 396, 210, 81, 25, },
+ {28, 90, 225, 415, 602, 716, 711, 589, 399, 212, 83, 26, },
+ {27, 89, 223, 412, 599, 715, 712, 591, 402, 215, 84, 26, },
+ {27, 87, 220, 408, 597, 714, 713, 594, 405, 217, 86, 27, },
+ {27, 86, 217, 405, 594, 713, 714, 597, 408, 220, 87, 27, },
+ {26, 84, 215, 402, 591, 712, 715, 599, 412, 223, 89, 27, },
+ {26, 83, 212, 399, 589, 711, 716, 602, 415, 225, 90, 28, },
+ {25, 81, 210, 396, 586, 710, 717, 604, 418, 228, 92, 28, },
+ {25, 80, 207, 393, 584, 709, 718, 607, 421, 231, 93, 29, },
+ {24, 79, 205, 390, 581, 708, 719, 609, 424, 233, 95, 29, },
+ {24, 77, 202, 387, 578, 707, 720, 612, 427, 236, 97, 30, },
+ {24, 76, 200, 383, 576, 705, 720, 614, 430, 239, 98, 31, },
+ {23, 75, 197, 380, 573, 704, 721, 617, 433, 241, 100, 31, },
+ {23, 73, 195, 377, 570, 703, 722, 619, 437, 244, 101, 32, },
+ {23, 72, 192, 374, 567, 702, 723, 621, 440, 247, 103, 32, },
+ {22, 71, 190, 371, 565, 700, 723, 624, 443, 250, 105, 33, },
+ {22, 69, 187, 368, 562, 699, 724, 626, 446, 252, 107, 33, },
+ {22, 68, 185, 365, 559, 698, 724, 628, 449, 255, 108, 34, },
+ {21, 67, 183, 362, 556, 696, 725, 631, 452, 258, 110, 35, },
+ {21, 66, 180, 359, 554, 695, 726, 633, 455, 261, 112, 35, },
+ {21, 65, 178, 356, 551, 693, 726, 635, 458, 264, 114, 36, },
+ {21, 63, 175, 353, 548, 692, 727, 638, 461, 266, 115, 37, },
+ {20, 62, 173, 349, 545, 690, 727, 640, 465, 269, 117, 37, },
+ {20, 61, 171, 346, 542, 689, 727, 642, 468, 272, 119, 38, },
+ {20, 60, 169, 343, 539, 687, 728, 644, 471, 275, 121, 39, },
+ {20, 59, 166, 340, 537, 686, 728, 646, 474, 278, 123, 40, },
+ {19, 58, 164, 337, 534, 684, 729, 648, 477, 281, 125, 40, },
+ {19, 57, 162, 334, 531, 683, 729, 651, 480, 284, 127, 41, },
+ {19, 56, 160, 331, 528, 681, 729, 653, 483, 287, 129, 42, },
+ {19, 55, 157, 328, 525, 679, 729, 655, 486, 289, 130, 43, },
+ {18, 54, 155, 325, 522, 677, 730, 657, 489, 292, 132, 44, },
+ {18, 53, 153, 322, 519, 676, 730, 659, 492, 295, 134, 44, },
+ {18, 52, 151, 319, 516, 674, 730, 661, 495, 298, 136, 45, },
+ {18, 51, 149, 316, 513, 672, 730, 663, 498, 301, 138, 46, },
+ {18, 50, 147, 313, 510, 670, 730, 665, 501, 304, 141, 47, },
+ {18, 49, 145, 310, 507, 669, 730, 667, 504, 307, 143, 48, },
+};
diff --git a/drivers/media/platform/xilinx/xilinx-multi-scaler.c b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
new file mode 100644
index 000000000000..0202f6a5b9e8
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-multi-scaler.c
@@ -0,0 +1,2450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Memory-to-Memory Video Multi-Scaler IP
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Suresh Gupta <suresh.gupta@xilinx.com>
+ *
+ * Based on the virtual v4l2-mem2mem example device
+ *
+ * This driver adds support to control the Xilinx Video Multi
+ * Scaler Controller
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "xilinx-multi-scaler-coeff.h"
+
+/* 0x0000 : Control signals */
+#define XM2MSC_AP_CTRL 0x0000
+#define XM2MSC_AP_CTRL_START BIT(0)
+#define XM2MSC_AP_CTRL_DONE BIT(1)
+#define XM2MSC_AP_CTRL_IDEL BIT(2)
+#define XM2MSC_AP_CTRL_READY BIT(3)
+#define XM2MSC_AP_CTRL_AUTO_RESTART BIT(7)
+
+/* 0x0004 : Global Interrupt Enable Register */
+#define XM2MSC_GIE 0x0004
+#define XM2MSC_GIE_EN BIT(0)
+
+/* 0x0008 : IP Interrupt Enable Register (Read/Write) */
+#define XM2MSC_IER 0x0008
+#define XM2MSC_ISR 0x000c
+#define XM2MSC_ISR_DONE BIT(0)
+#define XM2MSC_ISR_READY BIT(1)
+
+#define XM2MSC_NUM_OUTS 0x0010
+
+#define XM2MSC_WIDTHIN 0x000
+#define XM2MSC_WIDTHOUT 0x008
+#define XM2MSC_HEIGHTIN 0x010
+#define XM2MSC_HEIGHTOUT 0x018
+#define XM2MSC_LINERATE 0x020
+#define XM2MSC_PIXELRATE 0x028
+#define XM2MSC_INPIXELFMT 0x030
+#define XM2MSC_OUTPIXELFMT 0x038
+#define XM2MSC_INSTRIDE 0x050
+#define XM2MSC_OUTSTRIDE 0x058
+#define XM2MSC_SRCIMGBUF0 0x060
+#define XM2MSC_SRCIMGBUF1 0x070
+#define XM2MSC_DSTIMGBUF0 0x090
+#define XM2MSC_DSTIMGBUF1 0x0100
+
+#define XM2MVSC_VFLTCOEFF_L 0x2000
+#define XM2MVSC_VFLTCOEFF(x) (XM2MVSC_VFLTCOEFF_L + 0x2000 * (x))
+#define XM2MVSC_HFLTCOEFF_L 0x2800
+#define XM2MVSC_HFLTCOEFF(x) (XM2MVSC_HFLTCOEFF_L + 0x2000 * (x))
+
+#define XM2MSC_CHAN_REGS_START(x) (0x100 + 0x200 * (x))
+
+/*
+ * IP has reserved area between XM2MSC_DSTIMGBUF0 and
+ * XM2MSC_DSTIMGBUF1 registers of channel 4
+ */
+#define XM2MSC_RESERVED_AREA 0x600
+
+/* GPIO RESET MACROS */
+#define XM2MSC_RESET_ASSERT (0x1)
+#define XM2MSC_RESET_DEASSERT (0x0)
+
+#define XM2MSC_MIN_CHAN 1
+#define XM2MSC_MAX_CHAN 8
+
+#define XM2MSC_MAX_WIDTH (8192)
+#define XM2MSC_MAX_HEIGHT (4320)
+#define XM2MSC_MIN_WIDTH (64)
+#define XM2MSC_MIN_HEIGHT (64)
+#define XM2MSC_STEP_PRECISION (65536)
+/* Mask definitions for Low 16 bits in a 32 bit number */
+#define XM2MSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XM2MSC_BITSHIFT_16 (16)
+
+#define XM2MSC_DRIVER_NAME "xm2msc"
+
+#define CHAN_ATTACHED BIT(0)
+#define CHAN_OPENED BIT(1)
+
+#define XM2MSC_CHAN_OUT 0
+#define XM2MSC_CHAN_CAP 1
+
+#define NUM_STREAM(_x) \
+ ({ typeof(_x) (x) = (_x); \
+ min(ffz(x->out_streamed_chan), \
+ ffz(x->cap_streamed_chan)); })
+
+#define XM2MSC_ALIGN_MUL 8
+
+/*
+ * These are temporary variables. Once the stride and height
+ * alignment support added to plugin, these variables will
+ * be remove.
+ */
+static unsigned int output_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_stride_align,
+ "Per Cahnnel stride alignment requied at output.");
+
+static unsigned int capture_stride_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_stride_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_stride_align,
+ "Per channel stride alignment requied at capture.");
+
+static unsigned int output_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(output_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(output_height_align,
+ "Per Channel height alignment requied at output.");
+
+static unsigned int capture_height_align[XM2MSC_MAX_CHAN] = {
+ 1, 1, 1, 1, 1, 1, 1, 1 };
+module_param_array(capture_height_align, uint, NULL, 0644);
+MODULE_PARM_DESC(capture_height_align,
+ "Per channel height alignment requied at capture.");
+
+/* Xilinx Video Specific Color/Pixel Formats */
+enum xm2msc_pix_fmt {
+ XILINX_M2MSC_FMT_RGBX8 = 10,
+ XILINX_M2MSC_FMT_YUVX8 = 11,
+ XILINX_M2MSC_FMT_YUYV8 = 12,
+ XILINX_M2MSC_FMT_RGBX10 = 15,
+ XILINX_M2MSC_FMT_YUVX10 = 16,
+ XILINX_M2MSC_FMT_Y_UV8 = 18,
+ XILINX_M2MSC_FMT_Y_UV8_420 = 19,
+ XILINX_M2MSC_FMT_RGB8 = 20,
+ XILINX_M2MSC_FMT_YUV8 = 21,
+ XILINX_M2MSC_FMT_Y_UV10 = 22,
+ XILINX_M2MSC_FMT_Y_UV10_420 = 23,
+ XILINX_M2MSC_FMT_Y8 = 24,
+ XILINX_M2MSC_FMT_Y10 = 25,
+ XILINX_M2MSC_FMT_BGRX8 = 27,
+ XILINX_M2MSC_FMT_UYVY8 = 28,
+ XILINX_M2MSC_FMT_BGR8 = 29,
+};
+
+/**
+ * struct xm2msc_fmt - driver info for each of the supported video formats
+ * @name: human-readable device tree name for this entry
+ * @fourcc: standard format identifier
+ * @xm2msc_fmt: Xilinx Video Specific Color/Pixel Formats
+ * @num_buffs: number of physically non-contiguous data planes/buffs
+ */
+struct xm2msc_fmt {
+ char *name;
+ u32 fourcc;
+ enum xm2msc_pix_fmt xm2msc_fmt;
+ u32 num_buffs;
+};
+
+static const struct xm2msc_fmt formats[] = {
+ {
+ .name = "xbgr8888",
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xvuy8888",
+ .fourcc = V4L2_PIX_FMT_XVUY32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuyv",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUYV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xbgr2101010",
+ .fourcc = V4L2_PIX_FMT_XBGR30,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGBX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "yuvx2101010",
+ .fourcc = V4L2_PIX_FMT_XVUY10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUVX10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv16",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "nv12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV8_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "bgr888",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_RGB8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "vuy888",
+ .fourcc = V4L2_PIX_FMT_VUY24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_YUV8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv20",
+ .fourcc = V4L2_PIX_FMT_XV20,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15M,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 2,
+ },
+ {
+ .name = "xv15",
+ .fourcc = V4L2_PIX_FMT_XV15,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y_UV10_420,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y8",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "y10",
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_Y10,
+ .num_buffs = 1,
+ },
+ {
+ .name = "xrgb8888",
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGRX8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "uyvy",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_UYVY8,
+ .num_buffs = 1,
+ },
+ {
+ .name = "rgb888",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .xm2msc_fmt = XILINX_M2MSC_FMT_BGR8,
+ .num_buffs = 1,
+ },
+};
+
+/**
+ * struct xm2msc_q_data - Per-queue, driver-specific private data
+ * There is one source queue and one destination queue for each m2m context.
+ * @width: frame width
+ * @height: frame height
+ * @stride: bytes per lines
+ * @nbuffs: Current number of buffs
+ * @bytesperline: bytes per line per plane
+ * @sizeimage: image size per plane
+ * @colorspace: supported colorspace
+ * @field: supported field value
+ * @fmt: format info
+ */
+struct xm2msc_q_data {
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int nbuffs;
+ unsigned int bytesperline[2];
+ unsigned int sizeimage[2];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_field field;
+ const struct xm2msc_fmt *fmt;
+};
+
+/**
+ * struct xm2msc_chan_ctx - Scaler Channel Info, Per-Channel context
+ * @regs: IO mapped base address of the Channel
+ * @xm2msc_dev: Pointer to struct xm2m_msc_dev
+ * @num: HW Scaling Channel number
+ * @minor: Minor number of the video device
+ * @output_stride_align: required align stride value at output pad
+ * @capture_stride_align: required align stride valure at capture pad
+ * @output_height_align: required align height value at output pad
+ * @capture_height_align: required align heigh value at capture pad
+ * @status: channel status, CHAN_ATTACHED or CHAN_OPENED
+ * @frames: number of frames processed
+ * @vfd: V4L2 device
+ * @fh: v4l2 file handle
+ * @m2m_dev: m2m device
+ * @m2m_ctx: memory to memory context structure
+ * @q_data: src & dst queue data
+ */
+struct xm2msc_chan_ctx {
+ void __iomem *regs;
+ struct xm2m_msc_dev *xm2msc_dev;
+ u32 num;
+ u32 minor;
+ u32 output_stride_align;
+ u32 capture_stride_align;
+ u32 output_height_align;
+ u32 capture_height_align;
+ u8 status;
+ unsigned long frames;
+
+ struct video_device vfd;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+
+ struct xm2msc_q_data q_data[2];
+};
+
+/**
+ * struct xm2m_msc_dev - Xilinx M2M Multi-scaler Device
+ * @dev: pointer to struct device instance used by the driver
+ * @regs: IO mapped base address of the HW/IP
+ * @irq: interrupt number
+ * @clk: video core clock
+ * @max_chan: maximum number of Scaling Channels
+ * @max_ht: maximum number of rows in a plane
+ * @max_wd: maximum number of column in a plane
+ * @taps: number of taps set in HW
+ * @supported_fmt: bitmap for all supported fmts by HW
+ * @dma_addr_size: Size of dma address pointer in IP (either 32 or 64)
+ * @ppc: Pixels per clock set in IP (1, 2 or 4)
+ * @rst_gpio: reset gpio handler
+ * @opened_chan: bitmap for all open channel
+ * @out_streamed_chan: bitmap for all out streamed channel
+ * @cap_streamed_chan: bitmap for all capture streamed channel
+ * @running_chan: currently running channels
+ * @device_busy: HW device is busy or not
+ * @isr_wait: flag to follow the ISR complete or not
+ * @isr_finished: Wait queue used to wait for IP to complete processing
+ * @v4l2_dev: main struct to for V4L2 device drivers
+ * @dev_mutex: lock for V4L2 device
+ * @mutex: lock for channel ctx
+ * @lock: lock used in IRQ
+ * @xm2msc_chan: arrey of channel context
+ * @hscaler_coeff: Array of filter coefficients for the Horizontal Scaler
+ * @vscaler_coeff: Array of filter coefficients for the Vertical Scaler
+ */
+struct xm2m_msc_dev {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ u32 max_chan;
+ u32 max_ht;
+ u32 max_wd;
+ u32 taps;
+ u32 supported_fmt;
+ u32 dma_addr_size;
+ u8 ppc;
+ struct gpio_desc *rst_gpio;
+
+ u32 opened_chan;
+ u32 out_streamed_chan;
+ u32 cap_streamed_chan;
+ u32 running_chan;
+ bool device_busy;
+ bool isr_wait;
+ wait_queue_head_t isr_finished;
+
+ struct v4l2_device v4l2_dev;
+
+ struct mutex dev_mutex; /*the mutex for v4l2*/
+ struct mutex mutex; /*lock for bitmap reg*/
+ spinlock_t lock; /*IRQ lock*/
+
+ struct xm2msc_chan_ctx xm2msc_chan[XM2MSC_MAX_CHAN];
+ short hscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+ short vscaler_coeff[XSCALER_MAX_PHASES][XSCALER_MAX_TAPS];
+};
+
+#define fh_to_chanctx(__fh) container_of(__fh, struct xm2msc_chan_ctx, fh)
+
+static inline u32 xm2msc_readreg(const void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void xm2msc_write64reg(void __iomem *addr, u64 value)
+{
+ iowrite32(lower_32_bits(value), addr);
+ iowrite32(upper_32_bits(value), (void __iomem *)(addr + 4));
+}
+
+static inline void xm2msc_writereg(void __iomem *addr, u32 value)
+{
+ iowrite32(value, addr);
+}
+
+static bool xm2msc_is_yuv_singlebuff(u32 fourcc)
+{
+ if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_XV15 ||
+ fourcc == V4L2_PIX_FMT_NV16 || fourcc == V4L2_PIX_FMT_XV20)
+ return true;
+
+ return false;
+}
+
+static inline u32 xm2msc_yuv_1stplane_size(struct xm2msc_q_data *q_data,
+ u32 row_align)
+{
+ return q_data->bytesperline[0] * ALIGN(q_data->height, row_align);
+}
+
+static struct xm2msc_q_data *get_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ default:
+ v4l2_err(&chan_ctx->xm2msc_dev->v4l2_dev,
+ "Not supported Q type %d\n", type);
+ }
+ return NULL;
+}
+
+static u32 find_format_index(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ return i;
+}
+
+static const struct xm2msc_fmt *find_format(struct v4l2_format *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ return NULL;
+
+ return &formats[i];
+}
+
+static void
+xm2msc_hscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+
+ memset(xm2msc->hscaler_coeff, 0, sizeof(xm2msc->hscaler_coeff));
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void xm2msc_hscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int val, offset, rd_indx;
+ unsigned int i, j;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->hscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->hscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs + base_addr) +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xm2msc_vscaler_load_ext_coeff(struct xm2m_msc_dev *xm2msc,
+ const short *coeff, const u32 ntaps)
+{
+ unsigned int i, j;
+ int pad, offset;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XSCALER_MAX_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ /* Zero Entire Array */
+ memset(xm2msc->vscaler_coeff, 0, sizeof(xm2msc->vscaler_coeff));
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xm2msc->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+}
+
+static void
+xm2msc_vscaler_set_coeff(struct xm2msc_chan_ctx *chan_ctx,
+ const u32 base_addr)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ u32 val, i, j, offset, rd_indx;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+ const u32 nphases = XSCALER_MAX_PHASES;
+
+ offset = (XSCALER_MAX_TAPS - ntaps) / 2;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xm2msc->vscaler_coeff[i][rd_indx + 1] <<
+ XM2MSC_BITSHIFT_16) |
+ (xm2msc->vscaler_coeff[i][rd_indx] &
+ XM2MSC_MASK_LOW_16BITS);
+ xm2msc_writereg((xm2msc->regs +
+ base_addr) + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static u32
+xm2msc_select_hcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 hscale_ratio;
+ u32 width_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].width;
+ u32 width_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].width;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (width_out < width_in) {
+ hscale_ratio = (width_in * 10) / width_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (hscale_ratio > 35) {
+ *coeff = &xhsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (hscale_ratio > 25) {
+ *coeff = &xhsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (hscale_ratio > 15) {
+ *coeff = &xhsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xhsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static u32
+xm2msc_select_vcoeff(struct xm2msc_chan_ctx *chan_ctx, const short **coeff)
+{
+ u16 vscale_ratio;
+ u32 height_in = chan_ctx->q_data[XM2MSC_CHAN_OUT].height;
+ u32 height_out = chan_ctx->q_data[XM2MSC_CHAN_CAP].height;
+ u32 ntaps = chan_ctx->xm2msc_dev->taps;
+
+ if (height_out < height_in) {
+ vscale_ratio = (height_in * 10) / height_out;
+
+ switch (chan_ctx->xm2msc_dev->taps) {
+ case XSCALER_TAPS_12:
+ if (vscale_ratio > 35) {
+ *coeff = &xvsc_coeff_taps12[0][0];
+ ntaps = XSCALER_TAPS_12;
+ } else if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_10:
+ if (vscale_ratio > 25) {
+ *coeff = &xvsc_coeff_taps10[0][0];
+ ntaps = XSCALER_TAPS_10;
+ } else if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ case XSCALER_TAPS_8:
+ if (vscale_ratio > 15) {
+ *coeff = &xvsc_coeff_taps8[0][0];
+ ntaps = XSCALER_TAPS_8;
+ } else {
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ break;
+ default: /* or XSCALER_TAPS_6 */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+ } else {
+ /*
+ * Scale Up Mode will always use 6 tap filter
+ * This also includes 1:1
+ */
+ *coeff = &xvsc_coeff_taps6[0][0];
+ ntaps = XSCALER_TAPS_6;
+ }
+
+ return ntaps;
+}
+
+static void xm2mvsc_initialize_coeff_banks(struct xm2msc_chan_ctx *chan_ctx)
+{
+ const short *coeff = NULL;
+ u32 ntaps;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ ntaps = xm2msc_select_hcoeff(chan_ctx, &coeff);
+ xm2msc_hscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_hscaler_set_coeff(chan_ctx, XM2MVSC_HFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "htaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+
+ ntaps = xm2msc_select_vcoeff(chan_ctx, &coeff);
+ xm2msc_vscaler_load_ext_coeff(xm2msc, coeff, ntaps);
+ xm2msc_vscaler_set_coeff(chan_ctx, XM2MVSC_VFLTCOEFF(chan_ctx->num));
+
+ dev_dbg(xm2msc->dev, "vtaps %d selected for chan %d\n",
+ ntaps, chan_ctx->num);
+}
+
+static void xm2msc_set_chan_params(struct xm2msc_chan_ctx *chan_ctx,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, type);
+ const struct xm2msc_fmt *fmt = q_data->fmt;
+ void __iomem *base = chan_ctx->regs;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ xm2msc_writereg(base + XM2MSC_WIDTHIN, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTIN, q_data->height);
+ xm2msc_writereg(base + XM2MSC_INPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_INSTRIDE, q_data->stride);
+ } else {
+ xm2msc_writereg(base + XM2MSC_WIDTHOUT, q_data->width);
+ xm2msc_writereg(base + XM2MSC_HEIGHTOUT, q_data->height);
+ xm2msc_writereg(base + XM2MSC_OUTPIXELFMT, fmt->xm2msc_fmt);
+ xm2msc_writereg(base + XM2MSC_OUTSTRIDE, q_data->stride);
+ }
+}
+
+static void xm2msc_set_chan_com_params(struct xm2msc_chan_ctx *chan_ctx)
+{
+ void __iomem *base = chan_ctx->regs;
+ struct xm2msc_q_data *out_q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ struct xm2msc_q_data *cap_q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ u32 pixel_rate;
+ u32 line_rate;
+
+ xm2mvsc_initialize_coeff_banks(chan_ctx);
+
+ pixel_rate = (out_q_data->width * XM2MSC_STEP_PRECISION) /
+ cap_q_data->width;
+ line_rate = (out_q_data->height * XM2MSC_STEP_PRECISION) /
+ cap_q_data->height;
+
+ xm2msc_writereg(base + XM2MSC_PIXELRATE, pixel_rate);
+ xm2msc_writereg(base + XM2MSC_LINERATE, line_rate);
+}
+
+static void xm2msc_program_allchan(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_chan_params(chan_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ xm2msc_set_chan_com_params(chan_ctx);
+ }
+}
+
+static void
+xm2msc_pr_q(struct device *dev, struct xm2msc_q_data *q, int chan,
+ int type, const char *fun_name)
+{
+ unsigned int i;
+ const struct xm2msc_fmt *fmt = q->fmt;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ dev_dbg(dev, "\n\nOUTPUT Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+ else
+ dev_dbg(dev, "\n\nCAPTURE Q (%d) Context from [[ %s ]]",
+ chan, fun_name);
+
+ dev_dbg(dev, "width height stride clrspace field planes\n");
+ dev_dbg(dev, " %d %d %d %d %d %d\n",
+ q->width, q->height, q->stride,
+ q->colorspace, q->field, q->nbuffs);
+
+ for (i = 0; i < q->nbuffs; i++) {
+ dev_dbg(dev, "[plane %d ] bytesperline sizeimage\n", i);
+ dev_dbg(dev, " %d %d\n",
+ q->bytesperline[i], q->sizeimage[i]);
+ }
+
+ dev_dbg(dev, "fmt_name 4cc xlnx-fmt\n");
+ dev_dbg(dev, "%s %d %d\n",
+ fmt->name, fmt->fourcc, fmt->xm2msc_fmt);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_status(struct xm2m_msc_dev *xm2msc,
+ const char *fun_name)
+{
+ struct device *dev = xm2msc->dev;
+
+ dev_dbg(dev, "Status in %s\n", fun_name);
+ dev_dbg(dev, "opened_chan out_streamed_chan cap_streamed_chan\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x\n",
+ xm2msc->opened_chan, xm2msc->out_streamed_chan,
+ xm2msc->cap_streamed_chan);
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_chanctx(struct xm2msc_chan_ctx *ctx, const char *fun_name)
+{
+ struct device *dev = ctx->xm2msc_dev->dev;
+
+ dev_dbg(dev, "\n\n----- [[ %s ]]: Channel %d (0x%p) context -----\n",
+ fun_name, ctx->num, ctx);
+ dev_dbg(dev, "minor = %d\n", ctx->minor);
+ dev_dbg(dev, "reg mapped at %p\n", ctx->regs);
+ dev_dbg(dev, "xm2msc \tm2m_dev \tm2m_ctx\n");
+ dev_dbg(dev, "%p \t%p \t%p\n", ctx->xm2msc_dev,
+ ctx->m2m_dev, ctx->m2m_ctx);
+
+ if (ctx->status & CHAN_OPENED)
+ dev_dbg(dev, "Opened ");
+ if (ctx->status & CHAN_ATTACHED)
+ dev_dbg(dev, "and attached");
+ dev_dbg(dev, "\n");
+ dev_dbg(dev, "-----------------------------------\n");
+ dev_dbg(dev, "\n\n");
+}
+
+static void
+xm2msc_pr_screg(struct device *dev, const void __iomem *base)
+{
+ dev_dbg(dev, "Ctr, GIE, IE, IS OUT\n");
+ dev_dbg(dev, "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_AP_CTRL),
+ xm2msc_readreg(base + XM2MSC_GIE),
+ xm2msc_readreg(base + XM2MSC_IER),
+ xm2msc_readreg(base + XM2MSC_ISR),
+ xm2msc_readreg(base + XM2MSC_NUM_OUTS));
+}
+
+static void
+xm2msc_pr_chanreg(struct device *dev, struct xm2msc_chan_ctx *chan)
+{
+ const void __iomem *base = chan->regs;
+
+ dev_dbg(dev, "WIN HIN INPIXELFMT INSTRIDE SRCB0L/H SRCB1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHIN),
+ xm2msc_readreg(base + XM2MSC_HEIGHTIN),
+ xm2msc_readreg(base + XM2MSC_INPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_INSTRIDE),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF0 + 4),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1),
+ xm2msc_readreg(base + XM2MSC_SRCIMGBUF1 + 4));
+ dev_dbg(dev, "WOUT HOUT OUTPIXELFMT OUTSTRIDE DBUF0L/H DBUF1L/H\n");
+ dev_dbg(dev, "%d %d %d %d 0x%x/0x%x 0x%x/0x%x\n",
+ xm2msc_readreg(base + XM2MSC_WIDTHOUT),
+ xm2msc_readreg(base + XM2MSC_HEIGHTOUT),
+ xm2msc_readreg(base + XM2MSC_OUTPIXELFMT),
+ xm2msc_readreg(base + XM2MSC_OUTSTRIDE),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0),
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF0 + 4),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1),
+ chan->num == 4 ?
+ xm2msc_readreg(base +
+ XM2MSC_DSTIMGBUF1 + XM2MSC_RESERVED_AREA + 4) :
+ xm2msc_readreg(base + XM2MSC_DSTIMGBUF1 + 4));
+
+ dev_dbg(dev, "LINERATE PIXELRATE\n");
+ dev_dbg(dev, "0x%x 0x%x\n",
+ xm2msc_readreg(base + XM2MSC_LINERATE),
+ xm2msc_readreg(base + XM2MSC_PIXELRATE));
+}
+
+static void
+xm2msc_pr_allchanreg(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct device *dev = xm2msc->dev;
+
+ xm2msc_pr_screg(xm2msc->dev, xm2msc->regs);
+
+ for (i = 0; i < xm2msc->running_chan; i++) {
+ chan_ctx = &xm2msc->xm2msc_chan[i];
+ dev_dbg(dev, "Regs val for channel %d\n", i);
+ dev_dbg(dev, "______________________________________________\n");
+ xm2msc_pr_chanreg(dev, chan_ctx);
+ dev_dbg(dev, "processed frames = %lu\n", chan_ctx->frames);
+ dev_dbg(dev, "______________________________________________\n");
+ }
+}
+
+static inline bool xm2msc_testbit(int num, u32 *addr)
+{
+ return (*addr & BIT(num));
+}
+
+static inline void xm2msc_setbit(int num, u32 *addr)
+{
+ *addr |= BIT(num);
+}
+
+static inline void xm2msc_clrbit(int num, u32 *addr)
+{
+ *addr &= ~BIT(num);
+}
+
+static void xm2msc_stop(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data &= ~XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_start(struct xm2m_msc_dev *xm2msc)
+{
+ void __iomem *base = xm2msc->regs;
+ u32 data = xm2msc_readreg(base + XM2MSC_AP_CTRL);
+
+ data |= XM2MSC_AP_CTRL_START;
+ xm2msc_writereg(base + XM2MSC_AP_CTRL, data);
+}
+
+static void xm2msc_set_chan(struct xm2msc_chan_ctx *ctx, bool state)
+{
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ if (state)
+ xm2msc_setbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ else
+ xm2msc_clrbit(ctx->num, &ctx->xm2msc_dev->opened_chan);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+}
+
+static void
+xm2msc_set_chan_stream(struct xm2msc_chan_ctx *ctx, bool state, int type)
+{
+ u32 *ptr;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ spin_lock(&ctx->xm2msc_dev->lock);
+ if (state)
+ xm2msc_setbit(ctx->num, ptr);
+ else
+ xm2msc_clrbit(ctx->num, ptr);
+
+ spin_unlock(&ctx->xm2msc_dev->lock);
+}
+
+static int
+xm2msc_chk_chan_stream(struct xm2msc_chan_ctx *ctx, int type)
+{
+ u32 *ptr;
+ int ret;
+
+ if (type == XM2MSC_CHAN_OUT)
+ ptr = &ctx->xm2msc_dev->out_streamed_chan;
+ else
+ ptr = &ctx->xm2msc_dev->cap_streamed_chan;
+
+ mutex_lock(&ctx->xm2msc_dev->mutex);
+ ret = xm2msc_testbit(ctx->num, ptr);
+ mutex_unlock(&ctx->xm2msc_dev->mutex);
+
+ return ret;
+}
+
+static void xm2msc_set_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ xm2msc_setbit(index, &xm2msc->supported_fmt);
+}
+
+static int xm2msc_chk_fmt(struct xm2m_msc_dev *xm2msc, u32 index)
+{
+ return xm2msc_testbit(index, &xm2msc->supported_fmt);
+}
+
+static void xm2msc_reset(struct xm2m_msc_dev *xm2msc)
+{
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xm2msc->rst_gpio, XM2MSC_RESET_DEASSERT);
+}
+
+/*
+ * mem2mem callbacks
+ */
+static int xm2msc_job_ready(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ if ((v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) &&
+ (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0))
+ return 1;
+ return 0;
+}
+
+static bool xm2msc_alljob_ready(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if (!xm2msc_job_ready((void *)chan_ctx)) {
+ dev_dbg(xm2msc->dev, "chan %d not ready\n",
+ chan_ctx->num);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void xm2msc_chan_abort_bufs(struct xm2msc_chan_ctx *chan_ctx)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct vb2_v4l2_buffer *dst_vb, *src_vb;
+
+ spin_lock(&xm2msc->lock);
+ dev_dbg(xm2msc->dev, "aborting all buffers\n");
+
+ while (v4l2_m2m_num_src_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ while (v4l2_m2m_num_dst_bufs_ready(chan_ctx->m2m_ctx) > 0) {
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ spin_unlock(&xm2msc->lock);
+}
+
+static void xm2msc_job_abort(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+
+ xm2msc_chan_abort_bufs(chan_ctx);
+
+ /*
+ * Stream off the channel as job_abort may not always
+ * be called after streamoff
+ */
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static int xm2msc_set_bufaddr(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+ u32 row_align;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ void __iomem *base;
+ struct xm2msc_q_data *q_data;
+ dma_addr_t src_luma, dst_luma;
+ dma_addr_t src_croma, dst_croma;
+
+ if (!xm2msc_alljob_ready(xm2msc))
+ return -EINVAL;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ base = chan_ctx->regs;
+
+ src_vb = v4l2_m2m_next_src_buf(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_next_dst_buf(chan_ctx->m2m_ctx);
+
+ if (!src_vb || !dst_vb) {
+ v4l2_err(&xm2msc->v4l2_dev, "buffer not found chan = %d\n",
+ chan_ctx->num);
+ return -EINVAL;
+ }
+
+ src_luma = vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf, 0);
+ dst_luma = vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf, 0);
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_OUT];
+ row_align = chan_ctx->output_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_OUT].nbuffs == 2)
+ /* fmts having 2 planes 2 buffers */
+ src_croma =
+ vb2_dma_contig_plane_dma_addr(&src_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ /* fmts having 2 planes 1 contiguous buffer */
+ src_croma = src_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else /* fmts having 1 planes 1 contiguous buffer */
+ src_croma = 0;
+
+ q_data = &chan_ctx->q_data[XM2MSC_CHAN_CAP];
+ row_align = chan_ctx->capture_height_align;
+ if (chan_ctx->q_data[XM2MSC_CHAN_CAP].nbuffs == 2)
+ dst_croma =
+ vb2_dma_contig_plane_dma_addr(&dst_vb->vb2_buf,
+ 1);
+ else if (xm2msc_is_yuv_singlebuff(q_data->fmt->fourcc))
+ dst_croma = dst_luma +
+ xm2msc_yuv_1stplane_size(q_data, row_align);
+ else
+ dst_croma = 0;
+
+ if (xm2msc->dma_addr_size == 64 &&
+ sizeof(dma_addr_t) == sizeof(u64)) {
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_write64reg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_write64reg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ } else {
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF0, src_luma);
+ xm2msc_writereg(base + XM2MSC_SRCIMGBUF1, src_croma);
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF0, dst_luma);
+ if (chan_ctx->num == 4) /* TODO: To be fixed in HW */
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1 +
+ XM2MSC_RESERVED_AREA,
+ dst_croma);
+ else
+ xm2msc_writereg(base + XM2MSC_DSTIMGBUF1,
+ dst_croma);
+ }
+ }
+ return 0;
+}
+
+static void xm2msc_job_finish(struct xm2m_msc_dev *xm2msc)
+{
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ v4l2_m2m_job_finish(chan_ctx->m2m_dev, chan_ctx->m2m_ctx);
+ }
+}
+
+static void xm2msc_job_done(struct xm2m_msc_dev *xm2msc)
+{
+ u32 chan;
+
+ for (chan = 0; chan < xm2msc->running_chan; chan++) {
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ unsigned long flags;
+
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ src_vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |=
+ src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ }
+ chan_ctx->frames++;
+ }
+}
+
+static void xm2msc_device_run(void *priv)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ void __iomem *base = xm2msc->regs;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xm2msc->lock, flags);
+ if (xm2msc->device_busy) {
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+ return;
+ }
+ xm2msc->device_busy = true;
+
+ if (xm2msc->running_chan != NUM_STREAM(xm2msc)) {
+ dev_dbg(xm2msc->dev, "Running chan was %d\n",
+ xm2msc->running_chan);
+ xm2msc->running_chan = NUM_STREAM(xm2msc);
+
+ /* IP need reset for updating of XM2MSC_NUM_OUT */
+ xm2msc_reset(xm2msc);
+ xm2msc_writereg(base + XM2MSC_NUM_OUTS, xm2msc->running_chan);
+ xm2msc_program_allchan(xm2msc);
+ }
+ spin_unlock_irqrestore(&xm2msc->lock, flags);
+
+ dev_dbg(xm2msc->dev, "Running chan = %d\n", xm2msc->running_chan);
+ if (!xm2msc->running_chan) {
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ ret = xm2msc_set_bufaddr(xm2msc);
+ if (ret) {
+ /*
+ * All channel does not have buffer
+ * Currently we do not handle the removal of any Intermediate
+ * channel while streaming is going on
+ */
+ if (xm2msc->out_streamed_chan || xm2msc->cap_streamed_chan)
+ dev_err(xm2msc->dev,
+ "Buffer not available, streaming chan 0x%x\n",
+ xm2msc->cap_streamed_chan);
+
+ xm2msc->device_busy = false;
+ return;
+ }
+
+ xm2msc_writereg(base + XM2MSC_GIE, XM2MSC_GIE_EN);
+ xm2msc_writereg(base + XM2MSC_IER, XM2MSC_ISR_DONE);
+
+ xm2msc_pr_status(xm2msc, __func__);
+ xm2msc_pr_screg(xm2msc->dev, base);
+ xm2msc_pr_allchanreg(xm2msc);
+
+ xm2msc_start(xm2msc);
+
+ xm2msc->isr_wait = true;
+ wait_event(xm2msc->isr_finished, !xm2msc->isr_wait);
+
+ xm2msc_job_done(xm2msc);
+
+ xm2msc->device_busy = false;
+
+ if (xm2msc_alljob_ready(xm2msc))
+ xm2msc_device_run(xm2msc->xm2msc_chan);
+
+ xm2msc_job_finish(xm2msc);
+}
+
+static irqreturn_t xm2msc_isr(int irq, void *data)
+{
+ struct xm2m_msc_dev *xm2msc = (struct xm2m_msc_dev *)data;
+ void __iomem *base = xm2msc->regs;
+ u32 status;
+
+ status = xm2msc_readreg(base + XM2MSC_ISR);
+ if (!(status & XM2MSC_ISR_DONE))
+ return IRQ_NONE;
+
+ xm2msc_writereg(base + XM2MSC_ISR, status & XM2MSC_ISR_DONE);
+
+ xm2msc_stop(xm2msc);
+
+ xm2msc->isr_wait = false;
+ wake_up(&xm2msc->isr_finished);
+
+ return IRQ_HANDLED;
+}
+
+static int xm2msc_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_streamon(file, chan_ctx->m2m_ctx, type);
+}
+
+static int xm2msc_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+ int ret;
+
+ ret = v4l2_m2m_streamoff(file, chan_ctx->m2m_ctx, type);
+
+ /* Check if any channel is still running */
+ xm2msc_device_run(chan_ctx);
+ return ret;
+}
+
+static int xm2msc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_qbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_dqbuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static int xm2msc_expbuf(struct file *file, void *fh,
+ struct v4l2_exportbuffer *eb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_expbuf(file, chan_ctx->m2m_ctx, eb);
+}
+
+static int xm2msc_createbufs(struct file *file, void *fh,
+ struct v4l2_create_buffers *cb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_create_bufs(file, chan_ctx->m2m_ctx, cb);
+}
+
+static int xm2msc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_reqbufs(file, chan_ctx->m2m_ctx, reqbufs);
+}
+
+static int xm2msc_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return v4l2_m2m_querybuf(file, chan_ctx->m2m_ctx, buf);
+}
+
+static void
+xm2msc_cal_imagesize(struct xm2msc_chan_ctx *chan_ctx,
+ struct xm2msc_q_data *q_data, u32 type)
+{
+ unsigned int i;
+ u32 fourcc = q_data->fmt->fourcc;
+ u32 height = q_data->height;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ height = ALIGN(height, chan_ctx->output_height_align);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ height = ALIGN(height, chan_ctx->capture_height_align);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ q_data->bytesperline[i] = q_data->stride;
+ q_data->sizeimage[i] = q_data->stride * height;
+ }
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_XV15:
+ /*
+ * Adding chroma plane size as NV12/XV15
+ * have a contiguous buffer for luma and chroma
+ */
+ q_data->sizeimage[0] +=
+ q_data->stride * (height / 2);
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_XV15M:
+ q_data->sizeimage[1] =
+ q_data->stride * (height / 2);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int
+xm2msc_cal_stride(unsigned int width, enum xm2msc_pix_fmt xfmt, u8 ppc)
+{
+ unsigned int stride;
+ u32 align;
+
+ /* Stride in Bytes = (Width × Bytes per Pixel); */
+ switch (xfmt) {
+ case XILINX_M2MSC_FMT_RGBX8:
+ case XILINX_M2MSC_FMT_YUVX8:
+ case XILINX_M2MSC_FMT_RGBX10:
+ case XILINX_M2MSC_FMT_YUVX10:
+ case XILINX_M2MSC_FMT_BGRX8:
+ stride = width * 4;
+ break;
+ case XILINX_M2MSC_FMT_YUYV8:
+ case XILINX_M2MSC_FMT_UYVY8:
+ stride = width * 2;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV8:
+ case XILINX_M2MSC_FMT_Y_UV8_420:
+ case XILINX_M2MSC_FMT_Y8:
+ stride = width * 1;
+ break;
+ case XILINX_M2MSC_FMT_RGB8:
+ case XILINX_M2MSC_FMT_YUV8:
+ case XILINX_M2MSC_FMT_BGR8:
+ stride = width * 3;
+ break;
+ case XILINX_M2MSC_FMT_Y_UV10:
+ case XILINX_M2MSC_FMT_Y_UV10_420:
+ case XILINX_M2MSC_FMT_Y10:
+ /* 4 bytes per 3 pixels */
+ stride = DIV_ROUND_UP(width * 4, 3);
+ break;
+ default:
+ stride = 0;
+ }
+
+ /* The data size is 64*pixels per clock bits */
+ align = ppc * XM2MSC_ALIGN_MUL;
+ stride = ALIGN(stride, align);
+
+ return stride;
+}
+
+static int
+vidioc_try_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data;
+ struct vb2_queue *vq;
+ int index;
+
+ if (pix->width < XM2MSC_MIN_WIDTH || pix->width > xm2msc->max_wd ||
+ pix->height < XM2MSC_MIN_HEIGHT || pix->height > xm2msc->max_ht)
+ dev_dbg(xm2msc->dev,
+ "Wrong input parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+
+ /* The width value must be a multiple of pixels per clock */
+ if (pix->width % chan_ctx->xm2msc_dev->ppc) {
+ dev_dbg(xm2msc->dev,
+ "Wrong align parameters %d, wxh: %dx%d.\n",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ pix->width = ALIGN(pix->width, chan_ctx->xm2msc_dev->ppc);
+ }
+
+ /*
+ * V4L2 specification suggests the driver corrects the
+ * format struct if any of the dimensions is unsupported
+ */
+ if (pix->height < XM2MSC_MIN_HEIGHT)
+ pix->height = XM2MSC_MIN_HEIGHT;
+ else if (pix->height > xm2msc->max_ht)
+ pix->height = xm2msc->max_ht;
+
+ if (pix->width < XM2MSC_MIN_WIDTH)
+ pix->width = XM2MSC_MIN_WIDTH;
+ else if (pix->width > xm2msc->max_wd)
+ pix->width = xm2msc->max_wd;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = find_format(f);
+ index = find_format_index(f);
+ if (!q_data->fmt || index == ARRAY_SIZE(formats) ||
+ !xm2msc_chk_fmt(xm2msc, index)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Couldn't set format type %d, wxh: %dx%d. ",
+ f->type, f->fmt.pix.width, f->fmt.pix.height);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "fmt: %d, field: %d\n",
+ f->fmt.pix.pixelformat, f->fmt.pix.field);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void xm2msc_get_align(struct xm2msc_chan_ctx *chan_ctx)
+{
+ /*
+ * TODO: This is a temporary solution, will be reverted once stride and
+ * height align value come from application.
+ */
+ chan_ctx->output_stride_align = output_stride_align[chan_ctx->num];
+ chan_ctx->capture_stride_align = capture_stride_align[chan_ctx->num];
+ chan_ctx->output_height_align = output_height_align[chan_ctx->num];
+ chan_ctx->capture_height_align = capture_height_align[chan_ctx->num];
+ if (output_stride_align[chan_ctx->num] != 1 ||
+ capture_stride_align[chan_ctx->num] != 1 ||
+ output_height_align[chan_ctx->num] != 1 ||
+ capture_height_align[chan_ctx->num] != 1) {
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "You entered values other than default values.\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "Please note this may not be available for longer");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "and align values will come from application\n");
+ dev_info(chan_ctx->xm2msc_dev->dev,
+ "value entered are -\n"
+ "output_stride_align = %d\n"
+ "output_height_align = %d\n"
+ "capture_stride_align = %d\n"
+ "capture_height_align = %d\n",
+ chan_ctx->output_stride_align,
+ chan_ctx->output_height_align,
+ chan_ctx->capture_stride_align,
+ chan_ctx->capture_height_align);
+ }
+}
+
+static int
+vidioc_s_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct xm2msc_q_data *q_data = get_q_data(chan_ctx, f->type);
+ unsigned int i;
+ unsigned int align = 1;
+
+ q_data = get_q_data(chan_ctx, f->type);
+
+ q_data->width = pix->width;
+ q_data->height = pix->height;
+ q_data->stride = xm2msc_cal_stride(pix->width,
+ q_data->fmt->xm2msc_fmt,
+ chan_ctx->xm2msc_dev->ppc);
+
+ xm2msc_get_align(chan_ctx);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ align = chan_ctx->output_stride_align;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ align = chan_ctx->capture_stride_align;
+
+ q_data->stride = ALIGN(q_data->stride, align);
+
+ q_data->colorspace = pix->colorspace;
+ q_data->field = pix->field;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, f->type);
+
+ for (i = 0; i < q_data->nbuffs; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data,
+ chan_ctx->num, f->type, __func__);
+
+ return 0;
+}
+
+static int xm2msc_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_try_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_cap(file, fh, f);
+ if (ret)
+ return ret;
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int xm2msc_s_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ ret = xm2msc_try_fmt_vid_out(file, fh, f);
+ if (ret)
+ return ret;
+
+ return vidioc_s_fmt(chan_ctx, f);
+}
+
+static int vidioc_g_fmt(struct xm2msc_chan_ctx *chan_ctx, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct xm2msc_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ unsigned int i;
+
+ vq = v4l2_m2m_get_vq(chan_ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = get_q_data(chan_ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ pix->width = q_data->width;
+ pix->height = q_data->height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->colorspace = q_data->colorspace;
+ pix->num_planes = q_data->nbuffs;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int xm2msc_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int xm2msc_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ return vidioc_g_fmt(chan_ctx, f);
+}
+
+static int enum_fmt(struct xm2m_msc_dev *xm2msc, struct v4l2_fmtdesc *f)
+{
+ const struct xm2msc_fmt *fmt;
+ unsigned int i, enabled = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i) && enabled++ == f->index)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ /* Format not found */
+ return -EINVAL;
+
+ /* Format found */
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int xm2msc_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_enum_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(fh);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return enum_fmt(chan_ctx->xm2msc_dev, f);
+}
+
+static int xm2msc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strncpy(cap->driver, XM2MSC_DRIVER_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, XM2MSC_DRIVER_NAME, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", XM2MSC_DRIVER_NAME);
+ /*
+ * This is only a mem-to-mem video device. The STREAMING
+ * device capability flags are left only for compatibility
+ * and are scheduled for removal.
+ */
+ cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int xm2msc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ unsigned int i;
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vq);
+ struct xm2msc_q_data *q_data;
+
+ q_data = get_q_data(chan_ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *nplanes = q_data->nbuffs;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+
+ dev_dbg(chan_ctx->xm2msc_dev->dev, "get %d buffer(s) of size %d",
+ *nbuffers, sizes[0]);
+ if (q_data->nbuffs == 2)
+ dev_dbg(chan_ctx->xm2msc_dev->dev, " and %d\n", sizes[1]);
+
+ return 0;
+}
+
+static int xm2msc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ struct xm2msc_q_data *q_data;
+ unsigned int i, num_buffs;
+
+ q_data = get_q_data(chan_ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+ num_buffs = q_data->nbuffs;
+
+ for (i = 0; i < num_buffs; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ v4l2_err(&xm2msc->v4l2_dev, "data will not fit into plane ");
+ v4l2_err(&xm2msc->v4l2_dev, "(%lu < %lu)\n",
+ vb2_plane_size(vb, i),
+ (long)q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < num_buffs; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static void xm2msc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(chan_ctx->m2m_ctx, vbuf);
+}
+
+static void xm2msc_return_all_buffers(struct xm2msc_chan_ctx *chan_ctx,
+ struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vb = v4l2_m2m_src_buf_remove(chan_ctx->m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(chan_ctx->m2m_ctx);
+ if (!vb)
+ break;
+ spin_lock_irqsave(&chan_ctx->xm2msc_dev->lock, flags);
+ v4l2_m2m_buf_done(vb, state);
+ spin_unlock_irqrestore(&chan_ctx->xm2msc_dev->lock, flags);
+ }
+}
+
+static int xm2msc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+ static struct xm2msc_q_data *q_data;
+ int type;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, true, XM2MSC_CHAN_CAP);
+
+ xm2msc_set_chan_params(chan_ctx, q->type);
+
+ if (xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_CAP) &&
+ xm2msc_chk_chan_stream(chan_ctx, XM2MSC_CHAN_OUT))
+ xm2msc_set_chan_com_params(chan_ctx);
+
+ type = V4L2_TYPE_IS_OUTPUT(q->type) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q_data = get_q_data(chan_ctx, type);
+ xm2msc_pr_q(chan_ctx->xm2msc_dev->dev, q_data, chan_ctx->num,
+ type, __func__);
+ xm2msc_pr_status(chan_ctx->xm2msc_dev, __func__);
+
+ return 0;
+}
+
+static void xm2msc_stop_streaming(struct vb2_queue *q)
+{
+ struct xm2msc_chan_ctx *chan_ctx = vb2_get_drv_priv(q);
+
+ xm2msc_return_all_buffers(chan_ctx, q, VB2_BUF_STATE_ERROR);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_OUT);
+ else
+ xm2msc_set_chan_stream(chan_ctx, false, XM2MSC_CHAN_CAP);
+}
+
+static const struct vb2_ops xm2msc_qops = {
+ .queue_setup = xm2msc_queue_setup,
+ .buf_prepare = xm2msc_buf_prepare,
+ .buf_queue = xm2msc_buf_queue,
+ .start_streaming = xm2msc_start_streaming,
+ .stop_streaming = xm2msc_stop_streaming,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct xm2msc_chan_ctx *chan_ctx = priv;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = chan_ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &xm2msc_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &xm2msc->dev_mutex;
+ src_vq->dev = xm2msc->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
+ dst_vq->drv_priv = chan_ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &xm2msc_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &xm2msc->dev_mutex;
+ dst_vq->dev = xm2msc->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static const struct v4l2_ioctl_ops xm2msc_ioctl_ops = {
+ .vidioc_querycap = xm2msc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = xm2msc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = xm2msc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = xm2msc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = xm2msc_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = xm2msc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = xm2msc_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out_mplane = xm2msc_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = xm2msc_s_fmt_vid_out,
+
+ .vidioc_reqbufs = xm2msc_reqbufs,
+ .vidioc_querybuf = xm2msc_querybuf,
+ .vidioc_expbuf = xm2msc_expbuf,
+ .vidioc_create_bufs = xm2msc_createbufs,
+
+ .vidioc_qbuf = xm2msc_qbuf,
+ .vidioc_dqbuf = xm2msc_dqbuf,
+
+ .vidioc_streamon = xm2msc_streamon,
+ .vidioc_streamoff = xm2msc_streamoff,
+};
+
+static void xm2msc_set_q_data(struct xm2msc_chan_ctx *chan_ctx,
+ const struct xm2msc_fmt *fmt,
+ enum v4l2_buf_type type)
+{
+ struct xm2msc_q_data *q_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ q_data = get_q_data(chan_ctx, type);
+
+ q_data->fmt = fmt;
+ q_data->width = xm2msc->max_wd;
+ q_data->height = xm2msc->max_ht;
+ q_data->field = V4L2_FIELD_NONE;
+ q_data->nbuffs = q_data->fmt->num_buffs;
+
+ q_data->stride = xm2msc_cal_stride(q_data->width,
+ q_data->fmt->xm2msc_fmt,
+ xm2msc->ppc);
+
+ xm2msc_cal_imagesize(chan_ctx, q_data, type);
+}
+
+static int xm2msc_set_chan_parm(struct xm2msc_chan_ctx *chan_ctx)
+{
+ int ret = 0;
+ unsigned int i;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+
+ chan_ctx->output_stride_align = 1;
+ chan_ctx->output_height_align = 1;
+ chan_ctx->capture_stride_align = 1;
+ chan_ctx->capture_height_align = 1;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (xm2msc_chk_fmt(xm2msc, i))
+ break;
+ }
+
+ /* No supported format */
+ if (i == ARRAY_SIZE(formats)) {
+ dev_err(xm2msc->dev, "no supported format found\n");
+ return -EINVAL;
+ }
+
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ xm2msc_set_q_data(chan_ctx, &formats[i],
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+static int xm2msc_open(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = NULL;
+ u32 minor, chan;
+ int ret;
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ minor = iminor(file_inode(file));
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ if ((chan_ctx->status & CHAN_ATTACHED) &&
+ chan_ctx->minor == minor)
+ break;
+ }
+
+ if (chan == xm2msc->max_chan) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan not found with minor = %d\n",
+ __func__, minor);
+ ret = -EBADF;
+ goto unlock;
+ }
+
+ /* Already opened, do not allow same channel
+ * to be open more then once
+ */
+ if (chan_ctx->status & CHAN_OPENED) {
+ v4l2_warn(&xm2msc->v4l2_dev,
+ "%s Chan already opened for minor = %d\n",
+ __func__, minor);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ v4l2_fh_init(&chan_ctx->fh, &chan_ctx->vfd);
+ file->private_data = &chan_ctx->fh;
+ v4l2_fh_add(&chan_ctx->fh);
+
+ chan_ctx->m2m_ctx = v4l2_m2m_ctx_init(chan_ctx->m2m_dev,
+ chan_ctx, &queue_init);
+ if (IS_ERR(chan_ctx->m2m_ctx)) {
+ ret = PTR_ERR(chan_ctx->m2m_ctx);
+ v4l2_err(&xm2msc->v4l2_dev,
+ "%s Chan M2M CTX not creted for minor %d\n",
+ __func__, minor);
+ goto error_m2m;
+ }
+
+ chan_ctx->fh.m2m_ctx = chan_ctx->m2m_ctx;
+ chan_ctx->status |= CHAN_OPENED;
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->frames = 0;
+
+ xm2msc_set_chan(chan_ctx, true);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance created\n", chan);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return 0;
+
+error_m2m:
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+unlock:
+ mutex_unlock(&xm2msc->dev_mutex);
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ xm2msc_pr_status(xm2msc, __func__);
+ return ret;
+}
+
+static int xm2msc_release(struct file *file)
+{
+ struct xm2m_msc_dev *xm2msc = video_drvdata(file);
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+
+ if (mutex_lock_interruptible(&xm2msc->dev_mutex))
+ return -ERESTARTSYS;
+
+ v4l2_m2m_ctx_release(chan_ctx->m2m_ctx);
+ v4l2_fh_del(&chan_ctx->fh);
+ v4l2_fh_exit(&chan_ctx->fh);
+ chan_ctx->status &= ~CHAN_OPENED;
+ xm2msc_set_chan(chan_ctx, false);
+
+ v4l2_info(&xm2msc->v4l2_dev, "Channel %d instance released\n",
+ chan_ctx->num);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return 0;
+}
+
+static unsigned int xm2msc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct xm2msc_chan_ctx *chan_ctx = fh_to_chanctx(file->private_data);
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_poll(file, chan_ctx->m2m_ctx, wait);
+ mutex_unlock(&xm2msc->dev_mutex);
+
+ return ret;
+}
+
+static int xm2msc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct xm2msc_chan_ctx *chan_ctx = file->private_data;
+ struct xm2m_msc_dev *xm2msc = chan_ctx->xm2msc_dev;
+ int ret;
+
+ mutex_lock(&xm2msc->dev_mutex);
+ ret = v4l2_m2m_mmap(file, chan_ctx->m2m_ctx, vma);
+
+ mutex_unlock(&xm2msc->dev_mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations xm2msc_fops = {
+ .owner = THIS_MODULE,
+ .open = xm2msc_open,
+ .release = xm2msc_release,
+ .poll = xm2msc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = xm2msc_mmap,
+};
+
+static const struct video_device xm2msc_videodev = {
+ .name = XM2MSC_DRIVER_NAME,
+ .fops = &xm2msc_fops,
+ .ioctl_ops = &xm2msc_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops xm2msc_m2m_ops = {
+ .device_run = xm2msc_device_run,
+ .job_ready = xm2msc_job_ready,
+ .job_abort = xm2msc_job_abort,
+};
+
+static int xm2msc_parse_of(struct platform_device *pdev,
+ struct xm2m_msc_dev *xm2msc)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int hw_vid_fmt_cnt;
+ const char *vid_fmts[ARRAY_SIZE(formats)];
+ int ret;
+ u32 i, j;
+
+ xm2msc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(xm2msc->clk)) {
+ ret = PTR_ERR(xm2msc->clk);
+ dev_err(dev, "failed to get clk (%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xm2msc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)xm2msc->regs))
+ return PTR_ERR((__force const void *)xm2msc->regs);
+
+ dev_dbg(dev, "IO Mem %pa mapped at %p\n", &res->start, xm2msc->regs);
+
+ ret = of_property_read_u32(node, "xlnx,max-chan",
+ &xm2msc->max_chan);
+ if (ret < 0)
+ return ret;
+
+ if (xm2msc->max_chan < XM2MSC_MIN_CHAN ||
+ xm2msc->max_chan > XM2MSC_MAX_CHAN) {
+ dev_err(dev,
+ "Invalid maximum scaler channels : %d",
+ xm2msc->max_chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xm2msc->max_wd);
+ if (ret < 0) {
+ dev_err(dev,
+ "missing xlnx,max-width prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_wd < XM2MSC_MIN_WIDTH ||
+ xm2msc->max_wd > XM2MSC_MAX_WIDTH) {
+ dev_err(dev, "Invalid width : %d",
+ xm2msc->max_wd);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xm2msc->max_ht);
+ if (ret < 0) {
+ dev_err(dev, "missing xlnx,max-height prop\n");
+ return ret;
+ }
+
+ if (xm2msc->max_ht < XM2MSC_MIN_HEIGHT ||
+ xm2msc->max_ht > XM2MSC_MAX_HEIGHT) {
+ dev_err(dev, "Invalid height : %d",
+ xm2msc->max_ht);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,dma-addr-width",
+ &xm2msc->dma_addr_size);
+ if (ret || (xm2msc->dma_addr_size != 32 &&
+ xm2msc->dma_addr_size != 64)) {
+ dev_err(dev, "missing/invalid addr width dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u8(node, "xlnx,pixels-per-clock",
+ &xm2msc->ppc);
+ if (ret || (xm2msc->ppc != 1 && xm2msc->ppc != 2 && xm2msc->ppc != 4)) {
+ dev_err(dev, "missing or invalid pixels per clock dts prop\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-taps",
+ &xm2msc->taps);
+ if (ret || (xm2msc->taps != XSCALER_TAPS_6 &&
+ xm2msc->taps != XSCALER_TAPS_8 &&
+ xm2msc->taps != XSCALER_TAPS_10 &&
+ xm2msc->taps != XSCALER_TAPS_12)) {
+ dev_err(dev, "missing/invalid taps in dts prop\n");
+ return -EINVAL;
+ }
+
+ xm2msc->irq = irq_of_parse_and_map(node, 0);
+ if (xm2msc->irq < 0) {
+ dev_err(dev, "Unable to get IRQ");
+ return xm2msc->irq;
+ }
+
+ dev_dbg(dev, "Max Channel Supported = %d\n", xm2msc->max_chan);
+ dev_dbg(dev, "DMA Addr width Supported = %d\n", xm2msc->dma_addr_size);
+ dev_dbg(dev, "Max col/row Supported = (%d) / (%d)\n",
+ xm2msc->max_wd, xm2msc->max_ht);
+ dev_dbg(dev, "taps Supported = %d\n", xm2msc->taps);
+ /* read supported video formats and update internal table */
+ hw_vid_fmt_cnt = of_property_count_strings(node, "xlnx,vid-formats");
+
+ ret = of_property_read_string_array(node, "xlnx,vid-formats",
+ vid_fmts, hw_vid_fmt_cnt);
+ if (ret < 0) {
+ dev_err(dev,
+ "Missing or invalid xlnx,vid-formats dts prop\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Supported format = ");
+ for (i = 0; i < hw_vid_fmt_cnt; i++) {
+ const char *vid_fmt_name = vid_fmts[i];
+
+ for (j = 0; j < ARRAY_SIZE(formats); j++) {
+ const char *dts_name = formats[j].name;
+
+ if (strcmp(vid_fmt_name, dts_name))
+ continue;
+ dev_dbg(dev, "%s ", dts_name);
+
+ xm2msc_set_fmt(xm2msc, j);
+ }
+ }
+ dev_dbg(dev, "\n");
+ xm2msc->rst_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xm2msc->rst_gpio)) {
+ ret = PTR_ERR(xm2msc->rst_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev,
+ "Probe deferred due to GPIO reset defer\n");
+ else
+ dev_err(dev,
+ "Unable to locate reset property in dt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void xm2msc_unreg_video_n_m2m(struct xm2m_msc_dev *xm2msc)
+{
+ struct xm2msc_chan_ctx *chan_ctx;
+ unsigned int chan;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+ if (!(chan_ctx->status & CHAN_ATTACHED))
+ break; /*We register video sequentially */
+ video_unregister_device(&chan_ctx->vfd);
+ chan_ctx->status &= ~CHAN_ATTACHED;
+
+ if (!IS_ERR(chan_ctx->m2m_dev))
+ v4l2_m2m_release(chan_ctx->m2m_dev);
+ }
+}
+
+static int xm2m_msc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct xm2m_msc_dev *xm2msc;
+ struct xm2msc_chan_ctx *chan_ctx;
+ struct video_device *vfd;
+ unsigned int chan;
+
+ xm2msc = devm_kzalloc(&pdev->dev, sizeof(*xm2msc), GFP_KERNEL);
+ if (!xm2msc)
+ return -ENOMEM;
+
+ ret = xm2msc_parse_of(pdev, xm2msc);
+ if (ret < 0)
+ return ret;
+
+ xm2msc->dev = &pdev->dev;
+
+ ret = clk_prepare_enable(xm2msc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clk (%d)\n", ret);
+ return ret;
+ }
+
+ xm2msc_reset(xm2msc);
+
+ spin_lock_init(&xm2msc->lock);
+
+ ret = v4l2_device_register(&pdev->dev, &xm2msc->v4l2_dev);
+ if (ret)
+ goto reg_dev_err;
+
+ for (chan = 0; chan < xm2msc->max_chan; chan++) {
+ chan_ctx = &xm2msc->xm2msc_chan[chan];
+
+ vfd = &chan_ctx->vfd;
+ *vfd = xm2msc_videodev;
+ vfd->lock = &xm2msc->dev_mutex;
+ vfd->v4l2_dev = &xm2msc->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, chan);
+ if (ret) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to register video dev for chan %d\n",
+ chan);
+ goto unreg_dev;
+ }
+
+ chan_ctx->status = CHAN_ATTACHED;
+
+ video_set_drvdata(vfd, xm2msc);
+ snprintf(vfd->name, sizeof(vfd->name),
+ "%s", xm2msc_videodev.name);
+ v4l2_info(&xm2msc->v4l2_dev,
+ " Device registered as /dev/video%d\n", vfd->num);
+
+ dev_dbg(xm2msc->dev, "%s Device registered as /dev/video%d\n",
+ __func__, vfd->num);
+
+ chan_ctx->m2m_dev = v4l2_m2m_init(&xm2msc_m2m_ops);
+ if (IS_ERR(chan_ctx->m2m_dev)) {
+ v4l2_err(&xm2msc->v4l2_dev,
+ "Failed to init mem2mem device for chan %d\n",
+ chan);
+ ret = PTR_ERR(chan_ctx->m2m_dev);
+ goto unreg_dev;
+ }
+ chan_ctx->xm2msc_dev = xm2msc;
+ chan_ctx->regs = xm2msc->regs + XM2MSC_CHAN_REGS_START(chan);
+ if (chan > 4) /* TODO: To be fixed in HW */
+ chan_ctx->regs += XM2MSC_RESERVED_AREA;
+ chan_ctx->num = chan;
+ chan_ctx->minor = vfd->minor;
+
+ /* Set channel parameters to default values */
+ ret = xm2msc_set_chan_parm(chan_ctx);
+ if (ret)
+ goto unreg_dev;
+
+ xm2msc_pr_chanctx(chan_ctx, __func__);
+ }
+
+ mutex_init(&xm2msc->dev_mutex);
+ mutex_init(&xm2msc->mutex);
+ init_waitqueue_head(&xm2msc->isr_finished);
+
+ ret = devm_request_irq(&pdev->dev, xm2msc->irq,
+ xm2msc_isr, IRQF_SHARED,
+ XM2MSC_DRIVER_NAME, xm2msc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to register IRQ\n");
+ goto unreg_dev;
+ }
+
+ platform_set_drvdata(pdev, xm2msc);
+
+ return 0;
+
+unreg_dev:
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+reg_dev_err:
+ clk_disable_unprepare(xm2msc->clk);
+ return ret;
+}
+
+static int xm2m_msc_remove(struct platform_device *pdev)
+{
+ struct xm2m_msc_dev *xm2msc = platform_get_drvdata(pdev);
+
+ xm2msc_unreg_video_n_m2m(xm2msc);
+ v4l2_device_unregister(&xm2msc->v4l2_dev);
+ clk_disable_unprepare(xm2msc->clk);
+ return 0;
+}
+
+static const struct of_device_id xm2m_msc_of_id_table[] = {
+ {.compatible = "xlnx,v-multi-scaler-v1.0"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xm2m_msc_of_id_table);
+
+static struct platform_driver xm2m_msc_driver = {
+ .driver = {
+ .name = "xilinx-multiscaler",
+ .of_match_table = xm2m_msc_of_id_table,
+ },
+ .probe = xm2m_msc_probe,
+ .remove = xm2m_msc_remove,
+};
+
+module_platform_driver(xm2m_msc_driver);
+
+MODULE_DESCRIPTION("Xilinx M2M Multi-Scaler Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("xlnx_m2m_multiscaler_dev");
diff --git a/drivers/media/platform/xilinx/xilinx-remapper.c b/drivers/media/platform/xilinx/xilinx-remapper.c
new file mode 100644
index 000000000000..d2e84ec1f2d6
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-remapper.c
@@ -0,0 +1,546 @@
+/*
+ * Xilinx Video Remapper
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XREMAP_MIN_WIDTH 1
+#define XREMAP_DEF_WIDTH 1920
+#define XREMAP_MAX_WIDTH 65535
+#define XREMAP_MIN_HEIGHT 1
+#define XREMAP_DEF_HEIGHT 1080
+#define XREMAP_MAX_HEIGHT 65535
+
+#define XREMAP_PAD_SINK 0
+#define XREMAP_PAD_SOURCE 1
+
+/**
+ * struct xremap_mapping_output - Output format description
+ * @code: media bus pixel core after remapping
+ * @num_components: number of pixel components after remapping
+ * @component_maps: configuration array corresponding to this output
+ */
+struct xremap_mapping_output {
+ u32 code;
+ unsigned int num_components;
+ unsigned int component_maps[4];
+};
+
+/**
+ * struct xremap_mapping - Input-output remapping description
+ * @code: media bus pixel code before remapping
+ * @width: video bus width in bits
+ * @num_components: number of pixel components before remapping
+ * @outputs: array of possible output formats
+ */
+struct xremap_mapping {
+ u32 code;
+ unsigned int width;
+ unsigned int num_components;
+ const struct xremap_mapping_output *outputs;
+};
+
+/**
+ * struct xremap_device - Xilinx Test Pattern Generator device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @config: device configuration parsed from its DT node
+ * @config.width: video bus width in bits
+ * @config.num_s_components: number of pixel components at the input
+ * @config.num_m_components: number of pixel components at the output
+ * @config.component_maps: component remapping configuration
+ * @default_mapping: Default mapping compatible with the configuration
+ * @default_output: Default output format for the default mapping
+ */
+struct xremap_device {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+
+ struct {
+ unsigned int width;
+ unsigned int num_s_components;
+ unsigned int num_m_components;
+ unsigned int component_maps[4];
+ } config;
+
+ const struct xremap_mapping *default_mapping;
+ const struct xremap_mapping_output *default_output;
+};
+
+static inline struct xremap_device *to_remap(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xremap_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Mappings
+ */
+
+static const struct xremap_mapping xremap_mappings[] = {
+ {
+ .code = MEDIA_BUS_FMT_RBG888_1X24,
+ .width = 8,
+ .num_components = 3,
+ .outputs = (const struct xremap_mapping_output[]) {
+ { MEDIA_BUS_FMT_RGB888_1X32_PADHI, 4, { 1, 0, 2, 4 } },
+ { },
+ },
+ },
+};
+
+static const struct xremap_mapping_output *
+xremap_match_mapping(struct xremap_device *xremap,
+ const struct xremap_mapping *mapping)
+{
+ const struct xremap_mapping_output *output;
+
+ if (mapping->width != xremap->config.width ||
+ mapping->num_components != xremap->config.num_s_components)
+ return NULL;
+
+ for (output = mapping->outputs; output->code; ++output) {
+ unsigned int i;
+
+ if (output->num_components != xremap->config.num_m_components)
+ continue;
+
+ for (i = 0; i < output->num_components; ++i) {
+ if (output->component_maps[i] !=
+ xremap->config.component_maps[i])
+ break;
+ }
+
+ if (i == output->num_components)
+ return output;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xremap_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (code->pad == XREMAP_PAD_SINK) {
+ const struct xremap_mapping *mapping = NULL;
+ unsigned int index = code->index + 1;
+ unsigned int i;
+
+ /* Iterate through the mappings and skip the ones that don't
+ * match the remapper configuration until we reach the requested
+ * index.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings) && index; ++i) {
+ mapping = &xremap_mappings[i];
+
+ if (xremap_match_mapping(xremap, mapping))
+ index--;
+ }
+
+ /* If the index was larger than the number of supported mappings
+ * return -EINVAL.
+ */
+ if (index > 0)
+ return -EINVAL;
+
+ code->code = mapping->code;
+ } else {
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, code->pad);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int xremap_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == XREMAP_PAD_SINK) {
+ /* The remapper doesn't restrict the size on the sink pad. */
+ fse->min_width = XREMAP_MIN_WIDTH;
+ fse->max_width = XREMAP_MAX_WIDTH;
+ fse->min_height = XREMAP_MIN_HEIGHT;
+ fse->max_height = XREMAP_MAX_HEIGHT;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+xremap_get_pad_format(struct xremap_device *xremap,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xremap->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xremap->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xremap_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+
+ fmt->format = *xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ return 0;
+}
+
+static int xremap_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ format = xremap_get_pad_format(xremap, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XREMAP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ /* Find the mapping. If the requested format has no mapping, use the
+ * default.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ mapping = &xremap_mappings[i];
+ if (mapping->code != fmt->format.code)
+ continue;
+
+ output = xremap_match_mapping(xremap, mapping);
+ if (output)
+ break;
+ }
+
+ if (!output) {
+ mapping = xremap->default_mapping;
+ output = xremap->default_output;
+ }
+
+ format->code = mapping->code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XREMAP_MIN_WIDTH, XREMAP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XREMAP_MIN_HEIGHT, XREMAP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = xremap_get_pad_format(xremap, cfg, XREMAP_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+ format->code = output->code;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/*
+ * xremap_init_formats - Initialize formats on all pads
+ * @subdev: remapper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+static void xremap_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xremap_device *xremap = to_remap(subdev);
+ struct v4l2_subdev_format format;
+
+ memset(&format, 0, sizeof(format));
+
+ format.pad = XREMAP_PAD_SINK;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.code = xremap->default_mapping->code;
+ format.format.width = XREMAP_DEF_WIDTH;
+ format.format.height = XREMAP_DEF_HEIGHT;
+
+ xremap_set_format(subdev, fh ? fh->pad : NULL, &format);
+}
+
+static int xremap_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xremap_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xremap_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops xremap_core_ops = {
+};
+
+static struct v4l2_subdev_video_ops xremap_video_ops = {
+};
+
+static struct v4l2_subdev_pad_ops xremap_pad_ops = {
+ .enum_mbus_code = xremap_enum_mbus_code,
+ .enum_frame_size = xremap_enum_frame_size,
+ .get_fmt = xremap_get_format,
+ .set_fmt = xremap_set_format,
+};
+
+static struct v4l2_subdev_ops xremap_ops = {
+ .core = &xremap_core_ops,
+ .video = &xremap_video_ops,
+ .pad = &xremap_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xremap_internal_ops = {
+ .open = xremap_open,
+ .close = xremap_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xremap_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xremap_parse_of(struct xremap_device *xremap)
+{
+ struct device_node *node = xremap->xvip.dev->of_node;
+ unsigned int i;
+ int ret;
+
+ /* Parse the DT properties. */
+ ret = of_property_read_u32(node, "xlnx,video-width",
+ &xremap->config.width);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,video-width");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,s-components",
+ &xremap->config.num_s_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,s-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,m-components",
+ &xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "#xlnx,m-components");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(node, "xlnx,component-maps",
+ xremap->config.component_maps,
+ xremap->config.num_m_components);
+ if (ret < 0) {
+ dev_dbg(xremap->xvip.dev, "unable to parse %s property\n",
+ "xlnx,component-maps");
+ return -EINVAL;
+ }
+
+ /* Validate the parsed values. */
+ if (xremap->config.num_s_components > 4 ||
+ xremap->config.num_m_components > 4) {
+ dev_dbg(xremap->xvip.dev,
+ "invalid number of components (s %u m %u)\n",
+ xremap->config.num_s_components,
+ xremap->config.num_m_components);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < xremap->config.num_m_components; ++i) {
+ if (xremap->config.component_maps[i] > 4) {
+ dev_dbg(xremap->xvip.dev, "invalid map %u @%u\n",
+ xremap->config.component_maps[i], i);
+ return -EINVAL;
+ }
+ }
+
+ /* Find the first mapping that matches the remapper configuration and
+ * store it as the default mapping.
+ */
+ for (i = 0; i < ARRAY_SIZE(xremap_mappings); ++i) {
+ const struct xremap_mapping_output *output;
+ const struct xremap_mapping *mapping;
+
+ mapping = &xremap_mappings[i];
+ output = xremap_match_mapping(xremap, mapping);
+
+ if (output) {
+ xremap->default_mapping = mapping;
+ xremap->default_output = output;
+ return 0;
+ }
+ }
+
+ dev_err(xremap->xvip.dev,
+ "No format compatible with device configuration\n");
+
+ return -EINVAL;
+}
+
+static int xremap_probe(struct platform_device *pdev)
+{
+ struct xremap_device *xremap;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ xremap = devm_kzalloc(&pdev->dev, sizeof(*xremap), GFP_KERNEL);
+ if (!xremap)
+ return -ENOMEM;
+
+ xremap->xvip.dev = &pdev->dev;
+
+ ret = xremap_parse_of(xremap);
+ if (ret < 0)
+ return ret;
+
+ xremap->xvip.clk = devm_clk_get(xremap->xvip.dev, NULL);
+ if (IS_ERR(xremap->xvip.clk))
+ return PTR_ERR(xremap->xvip.clk);
+
+ clk_prepare_enable(xremap->xvip.clk);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xremap->xvip.subdev;
+ v4l2_subdev_init(subdev, &xremap_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xremap_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xremap);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ xremap_init_formats(subdev, NULL);
+
+ xremap->pads[XREMAP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xremap->pads[XREMAP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xremap_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xremap->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xremap);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "device registered\n");
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xremap->xvip.clk);
+ return ret;
+}
+
+static int xremap_remove(struct platform_device *pdev)
+{
+ struct xremap_device *xremap = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xremap->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ clk_disable_unprepare(xremap->xvip.clk);
+
+ return 0;
+}
+
+static const struct of_device_id xremap_of_id_table[] = {
+ { .compatible = "xlnx,v-remapper" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xremap_of_id_table);
+
+static struct platform_driver xremap_driver = {
+ .driver = {
+ .name = "xilinx-remapper",
+ .of_match_table = xremap_of_id_table,
+ },
+ .probe = xremap_probe,
+ .remove = xremap_remove,
+};
+
+module_platform_driver(xremap_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Remapper Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-rgb2yuv.c b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
new file mode 100644
index 000000000000..20ae95946ca3
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-rgb2yuv.c
@@ -0,0 +1,566 @@
+/*
+ * Xilinx RGB to YUV Convertor
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XRGB2YUV_YMAX 0x100
+#define XRGB2YUV_YMIN 0x104
+#define XRGB2YUV_CBMAX 0x108
+#define XRGB2YUV_CBMIN 0x10c
+#define XRGB2YUV_CRMAX 0x110
+#define XRGB2YUV_CRMIN 0x114
+#define XRGB2YUV_YOFFSET 0x118
+#define XRGB2YUV_CBOFFSET 0x11c
+#define XRGB2YUV_CROFFSET 0x120
+#define XRGB2YUV_ACOEF 0x124
+#define XRGB2YUV_BCOEF 0x128
+#define XRGB2YUV_CCOEF 0x12c
+#define XRGB2YUV_DCOEF 0x130
+
+/**
+ * struct xrgb2yuv_device - Xilinx RGB2YUV device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP formats
+ * @ctrl_handler: control handler
+ */
+struct xrgb2yuv_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+static inline struct xrgb2yuv_device *to_rgb2yuv(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xrgb2yuv_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xrgb2yuv_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ if (!enable) {
+ xvip_stop(&xrgb2yuv->xvip);
+ return 0;
+ }
+
+ xvip_set_frame_size(&xrgb2yuv->xvip, &xrgb2yuv->formats[XVIP_PAD_SINK]);
+
+ xvip_start(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+__xrgb2yuv_get_pad_format(struct xrgb2yuv_device *xrgb2yuv,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xrgb2yuv->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xrgb2yuv->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xrgb2yuv_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+
+ fmt->format = *__xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xrgb2yuv_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, fmt->pad, fmt->which);
+
+ if (fmt->pad == XVIP_PAD_SOURCE) {
+ fmt->format = *format;
+ return 0;
+ }
+
+ xvip_set_format_size(format, fmt);
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = __xrgb2yuv_get_pad_format(xrgb2yuv, cfg, XVIP_PAD_SOURCE,
+ fmt->which);
+
+ xvip_set_format_size(format, fmt);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xrgb2yuv_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xrgb2yuv_device *xrgb2yuv = to_rgb2yuv(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xrgb2yuv_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xrgb2yuv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xrgb2yuv_device *xrgb2yuv =
+ container_of(ctrl->handler, struct xrgb2yuv_device,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_RGB2YUV_YMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMAX:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMAX, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CRMIN:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CRMIN, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_YOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_YOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CBOFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CBOFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CROFFSET:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CROFFSET, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_ACOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_ACOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_BCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_BCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_CCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_CCOEF, ctrl->val);
+ return 0;
+ case V4L2_CID_XILINX_RGB2YUV_DCOEF:
+ xvip_write(&xrgb2yuv->xvip, XRGB2YUV_DCOEF, ctrl->val);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops xrgb2yuv_ctrl_ops = {
+ .s_ctrl = xrgb2yuv_s_ctrl,
+};
+
+static struct v4l2_subdev_video_ops xrgb2yuv_video_ops = {
+ .s_stream = xrgb2yuv_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xrgb2yuv_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xrgb2yuv_get_format,
+ .set_fmt = xrgb2yuv_set_format,
+};
+
+static struct v4l2_subdev_ops xrgb2yuv_ops = {
+ .video = &xrgb2yuv_video_ops,
+ .pad = &xrgb2yuv_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xrgb2yuv_internal_ops = {
+ .open = xrgb2yuv_open,
+ .close = xrgb2yuv_close,
+};
+
+/*
+ * Control Configs
+ */
+
+static struct v4l2_ctrl_config xrgb2yuv_ctrls[] = {
+ {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMAX,
+ .name = "RGB to YUV: Maximum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YMIN,
+ .name = "RGB to YUV: Minimum Y value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMAX,
+ .name = "RGB to YUV: Maximum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBMIN,
+ .name = "RGB to YUV: Minimum Cb value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMAX,
+ .name = "RGB to YUV: Maximum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CRMIN,
+ .name = "RGB to YUV: Minimum Cr value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_YOFFSET,
+ .name = "RGB to YUV: Luma offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CBOFFSET,
+ .name = "RGB to YUV: Chroma Cb offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CROFFSET,
+ .name = "RGB to YUV: Chroma Cr offset",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_ACOEF,
+ .name = "RGB to YUV: CA coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_BCOEF,
+ .name = "RGB to YUV: CB coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_CCOEF,
+ .name = "RGB to YUV: CC coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ }, {
+ .ops = &xrgb2yuv_ctrl_ops,
+ .id = V4L2_CID_XILINX_RGB2YUV_DCOEF,
+ .name = "RGB to YUV: CD coefficient",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -((1 << 17) - 1),
+ .max = (1 << 17) - 1,
+ .step = 1,
+ },
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xrgb2yuv_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xrgb2yuv_pm_suspend(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_suspend(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xrgb2yuv_pm_resume(struct device *dev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = dev_get_drvdata(dev);
+
+ xvip_resume(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xrgb2yuv_parse_of(struct xrgb2yuv_device *xrgb2yuv)
+{
+ struct device *dev = xrgb2yuv->xvip.dev;
+ struct device_node *node = xrgb2yuv->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ u32 port_id;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "no reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "invalid reg in DT");
+ return -EINVAL;
+ }
+
+ xrgb2yuv->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ return 0;
+}
+
+static int xrgb2yuv_probe(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ unsigned int i;
+ int ret;
+
+ xrgb2yuv = devm_kzalloc(&pdev->dev, sizeof(*xrgb2yuv), GFP_KERNEL);
+ if (!xrgb2yuv)
+ return -ENOMEM;
+
+ xrgb2yuv->xvip.dev = &pdev->dev;
+
+ ret = xrgb2yuv_parse_of(xrgb2yuv);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xrgb2yuv->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xrgb2yuv->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xrgb2yuv->xvip.subdev;
+ v4l2_subdev_init(subdev, &xrgb2yuv_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xrgb2yuv_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xrgb2yuv);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ xvip_get_frame_size(&xrgb2yuv->xvip, default_format);
+
+ xrgb2yuv->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xrgb2yuv->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xrgb2yuv->default_formats[XVIP_PAD_SINK];
+ default_format->code = xrgb2yuv->vip_formats[XVIP_PAD_SOURCE]->code;
+
+ xrgb2yuv->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xrgb2yuv->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xrgb2yuv->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xrgb2yuv_media_ops;
+ ret = media_entity_pads_init(&subdev->entity, 2, xrgb2yuv->pads);
+ if (ret < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&xrgb2yuv->ctrl_handler, 13);
+
+ for (i = 0; i < ARRAY_SIZE(xrgb2yuv_ctrls); i++) {
+ xrgb2yuv_ctrls[i].def = xvip_read(&xrgb2yuv->xvip,
+ XRGB2YUV_YMAX + i * 4);
+ v4l2_ctrl_new_custom(&xrgb2yuv->ctrl_handler,
+ &xrgb2yuv_ctrls[i], NULL);
+ }
+
+ if (xrgb2yuv->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xrgb2yuv->ctrl_handler.error;
+ goto error;
+ }
+ subdev->ctrl_handler = &xrgb2yuv->ctrl_handler;
+
+ platform_set_drvdata(pdev, xrgb2yuv);
+
+ xvip_print_version(&xrgb2yuv->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+ return ret;
+}
+
+static int xrgb2yuv_remove(struct platform_device *pdev)
+{
+ struct xrgb2yuv_device *xrgb2yuv = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xrgb2yuv->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xrgb2yuv->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xrgb2yuv->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xrgb2yuv_pm_ops, xrgb2yuv_pm_suspend,
+ xrgb2yuv_pm_resume);
+
+static const struct of_device_id xrgb2yuv_of_id_table[] = {
+ { .compatible = "xlnx,v-rgb2yuv-7.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xrgb2yuv_of_id_table);
+
+static struct platform_driver xrgb2yuv_driver = {
+ .driver = {
+ .name = "xilinx-rgb2yuv",
+ .pm = &xrgb2yuv_pm_ops,
+ .of_match_table = xrgb2yuv_of_id_table,
+ },
+ .probe = xrgb2yuv_probe,
+ .remove = xrgb2yuv_remove,
+};
+
+module_platform_driver(xrgb2yuv_driver);
+
+MODULE_DESCRIPTION("Xilinx RGB to YUV Converter Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scaler.c b/drivers/media/platform/xilinx/xilinx-scaler.c
new file mode 100644
index 000000000000..bb0d52627a50
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scaler.c
@@ -0,0 +1,708 @@
+/*
+ * Xilinx Scaler
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fixp-arith.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH 32
+#define XSCALER_MAX_WIDTH 4096
+#define XSCALER_MIN_HEIGHT 32
+#define XSCALER_MAX_HEIGHT 4096
+
+#define XSCALER_HSF 0x0100
+#define XSCALER_VSF 0x0104
+#define XSCALER_SF_SHIFT 20
+#define XSCALER_SF_MASK 0xffffff
+#define XSCALER_SOURCE_SIZE 0x0108
+#define XSCALER_SIZE_HORZ_SHIFT 0
+#define XSCALER_SIZE_VERT_SHIFT 16
+#define XSCALER_SIZE_MASK 0xfff
+#define XSCALER_HAPERTURE 0x010c
+#define XSCALER_VAPERTURE 0x0110
+#define XSCALER_APERTURE_START_SHIFT 0
+#define XSCALER_APERTURE_END_SHIFT 16
+#define XSCALER_OUTPUT_SIZE 0x0114
+#define XSCALER_COEF_DATA_IN 0x0134
+#define XSCALER_COEF_DATA_IN_SHIFT 16
+
+/* Fixed point operations */
+#define FRAC_N 8
+
+static inline s16 fixp_new(s16 a)
+{
+ return a << FRAC_N;
+}
+
+static inline s16 fixp_mult(s16 a, s16 b)
+{
+ return ((s32)(a * b)) >> FRAC_N;
+}
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_format: Xilinx Video IP format
+ * @crop: Active crop rectangle for the sink pad
+ * @num_hori_taps: number of vertical taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @separate_yc_coef: separate coefficients for Luma(y) and Chroma(c)
+ * @separate_hv_coef: separate coefficients for Horizontal(h) and Vertical(v)
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_format;
+ struct v4l2_rect crop;
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ bool separate_yc_coef;
+ bool separate_hv_coef;
+};
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+/**
+ * lanczos - Lanczos 2D FIR kernel convolution
+ * @x: phase
+ * @a: Lanczos kernel size
+ *
+ * Return: the coefficient value in fixed point format.
+ */
+static s16 lanczos(s16 x, s16 a)
+{
+ s16 pi;
+ s16 numerator;
+ s16 denominator;
+ s16 temp;
+
+ if (x < -a || x > a)
+ return 0;
+ else if (x == 0)
+ return fixp_new(1);
+
+ /* a * sin(pi * x) * sin(pi * x / a) / (pi * pi * x * x) */
+
+ pi = (fixp_new(157) << FRAC_N) / fixp_new(50);
+
+ if (x < 0)
+ x = -x;
+
+ /* sin(pi * x) */
+ temp = fixp_mult(fixp_new(180), x);
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) */
+ numerator = fixp_mult(temp, a);
+
+ /* sin(pi * x / a) */
+ temp = (fixp_mult(fixp_new(180), x) << FRAC_N) / a;
+ temp = fixp_sin16(temp >> FRAC_N);
+
+ /* a * sin(pi * x) * sin(pi * x / a) */
+ numerator = fixp_mult(temp, numerator);
+
+ /* pi * pi * x * x */
+ denominator = fixp_mult(pi, pi);
+ temp = fixp_mult(x, x);
+ denominator = fixp_mult(temp, denominator);
+
+ return (numerator << FRAC_N) / denominator;
+}
+
+/**
+ * xscaler_set_coefs - generate and program the coefficient table
+ * @xscaler: scaler device
+ * @taps: maximum coefficient tap index
+ *
+ * Generate the coefficient table using Lanczos resampling, and program
+ * generated coefficients to the scaler. The generated coefficients are
+ * supposed to work regardless of resolutions.
+ *
+ * Return: 0 if the coefficient table is programmed, and -ENOMEM if memory
+ * allocation for the table fails.
+ */
+static int xscaler_set_coefs(struct xscaler_device *xscaler, s16 taps)
+{
+ s16 *coef;
+ s16 dy;
+ u32 coef_val;
+ u16 phases = xscaler->max_num_phases;
+ u16 i;
+ u16 j;
+
+ coef = kcalloc(phases, sizeof(*coef), GFP_KERNEL);
+ if (!coef)
+ return -ENOMEM;
+
+ for (i = 0; i < phases; i++) {
+ s16 sum = 0;
+
+ dy = ((fixp_new(i) << FRAC_N) / fixp_new(phases));
+
+ /* Generate Lanczos coefficients */
+ for (j = 0; j < taps; j++) {
+ coef[j] = lanczos(fixp_new(j - (taps >> 1)) + dy,
+ fixp_new(taps >> 1));
+ sum += coef[j];
+ }
+
+ /* Program coefficients */
+ for (j = 0; j < taps; j += 2) {
+ /* Normalize and multiply coefficients */
+ coef_val = (((coef[j] << FRAC_N) << (FRAC_N - 2)) /
+ sum) & 0xffff;
+ if (j + 1 < taps)
+ coef_val |= ((((coef[j + 1] << FRAC_N) <<
+ (FRAC_N - 2)) / sum) & 0xffff) <<
+ 16;
+
+ xvip_write(&xscaler->xvip, XSCALER_COEF_DATA_IN,
+ coef_val);
+ }
+ }
+
+ kfree(coef);
+
+ return 0;
+}
+
+static void xscaler_set_aperture(struct xscaler_device *xscaler)
+{
+ u16 start;
+ u16 end;
+ u32 scale_factor;
+
+ xvip_disable_reg_update(&xscaler->xvip);
+
+ /* set horizontal aperture */
+ start = xscaler->crop.left;
+ end = start + xscaler->crop.width - 1;
+ xvip_write(&xscaler->xvip, XSCALER_HAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set vertical aperture */
+ start = xscaler->crop.top;
+ end = start + xscaler->crop.height - 1;
+ xvip_write(&xscaler->xvip, XSCALER_VAPERTURE,
+ (end << XSCALER_APERTURE_END_SHIFT) |
+ (start << XSCALER_APERTURE_START_SHIFT));
+
+ /* set scaling factors */
+ scale_factor = ((xscaler->crop.width << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].width) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_HSF, scale_factor);
+
+ scale_factor = ((xscaler->crop.height << XSCALER_SF_SHIFT) /
+ xscaler->formats[XVIP_PAD_SOURCE].height) &
+ XSCALER_SF_MASK;
+ xvip_write(&xscaler->xvip, XSCALER_VSF, scale_factor);
+
+ xvip_enable_reg_update(&xscaler->xvip);
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width;
+ u32 height;
+
+ if (!enable) {
+ xvip_stop(&xscaler->xvip);
+ return 0;
+ }
+
+ /* set input width / height */
+ width = xscaler->formats[XVIP_PAD_SINK].width;
+ height = xscaler->formats[XVIP_PAD_SINK].height;
+ xvip_write(&xscaler->xvip, XSCALER_SOURCE_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set output width / height */
+ width = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height = xscaler->formats[XVIP_PAD_SOURCE].height;
+ xvip_write(&xscaler->xvip, XSCALER_OUTPUT_SIZE,
+ (height << XSCALER_SIZE_VERT_SHIFT) |
+ (width << XSCALER_SIZE_HORZ_SHIFT));
+
+ /* set aperture */
+ xscaler_set_aperture(xscaler);
+
+ xvip_start(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = XSCALER_MAX_WIDTH;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = XSCALER_MAX_HEIGHT;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static struct v4l2_rect *__xscaler_get_crop(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&xscaler->xvip.subdev, cfg,
+ XVIP_PAD_SINK);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void xscaler_try_crop(const struct v4l2_mbus_framefmt *sink,
+ struct v4l2_rect *crop)
+{
+
+ crop->left = min_t(u32, crop->left, sink->width - XSCALER_MIN_WIDTH);
+ crop->top = min_t(u32, crop->top, sink->height - XSCALER_MIN_HEIGHT);
+ crop->width = clamp_t(u32, crop->width, XSCALER_MIN_WIDTH,
+ sink->width - crop->left);
+ crop->height = clamp_t(u32, crop->height, XSCALER_MIN_HEIGHT,
+ sink->height - crop->top);
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, XSCALER_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, XSCALER_MAX_HEIGHT);
+
+ fmt->format = *format;
+
+ if (fmt->pad == XVIP_PAD_SINK) {
+ /* Set the crop rectangle to the full frame */
+ crop = __xscaler_get_crop(xscaler, cfg, fmt->which);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
+
+ return 0;
+}
+
+static int xscaler_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if (sel->pad != XVIP_PAD_SINK)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *__xscaler_get_crop(xscaler, cfg, sel->which);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int xscaler_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ if ((sel->target != V4L2_SEL_TGT_CROP) || (sel->pad != XVIP_PAD_SINK))
+ return -EINVAL;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, XVIP_PAD_SINK,
+ sel->which);
+ xscaler_try_crop(format, &sel->r);
+ *__xscaler_get_crop(xscaler, cfg, sel->which) = sel->r;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+ .get_selection = xscaler_get_selection,
+ .set_selection = xscaler_set_selection,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Power Management
+ */
+
+static int __maybe_unused xscaler_pm_suspend(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_suspend(&xscaler->xvip);
+
+ return 0;
+}
+
+static int __maybe_unused xscaler_pm_resume(struct device *dev)
+{
+ struct xscaler_device *xscaler = dev_get_drvdata(dev);
+
+ xvip_resume(&xscaler->xvip);
+
+ return 0;
+}
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports == NULL)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ const struct xvip_video_format *vip_format;
+
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ if (!xscaler->vip_format) {
+ xscaler->vip_format = vip_format;
+ } else if (xscaler->vip_format != vip_format) {
+ dev_err(dev, "in/out format mismatch in DT");
+ return -EINVAL;
+ }
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "xlnx,max-num-phases",
+ &xscaler->max_num_phases);
+ if (ret < 0)
+ return ret;
+
+ xscaler->separate_yc_coef =
+ of_property_read_bool(node, "xlnx,separate-yc-coef");
+
+ xscaler->separate_hv_coef =
+ of_property_read_bool(node, "xlnx,separate-hv-coef");
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ u32 size;
+ int ret;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Reset and initialize the core */
+ xvip_reset(&xscaler->xvip);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_format->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ size = xvip_read(&xscaler->xvip, XSCALER_SOURCE_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ size = xvip_read(&xscaler->xvip, XSCALER_OUTPUT_SIZE);
+ default_format->width = (size >> XSCALER_SIZE_HORZ_SHIFT) &
+ XSCALER_SIZE_MASK;
+ default_format->height = (size >> XSCALER_SIZE_VERT_SHIFT) &
+ XSCALER_SIZE_MASK;
+
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ xvip_print_version(&xscaler->xvip);
+
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (xscaler->separate_yc_coef) {
+ ret = xscaler_set_coefs(xscaler, (s16)xscaler->num_hori_taps);
+ if (ret < 0)
+ goto error;
+
+ if (xscaler->separate_hv_coef) {
+ ret = xscaler_set_coefs(xscaler,
+ (s16)xscaler->num_vert_taps);
+ if (ret < 0)
+ goto error;
+ }
+ }
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xscaler_pm_ops, xscaler_pm_suspend, xscaler_pm_resume);
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-scaler-8.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+
+MODULE_DESCRIPTION("Xilinx Scaler Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-channel.c b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
new file mode 100644
index 000000000000..4f3a8d03d217
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-channel.c
@@ -0,0 +1,452 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/xilinx-v4l2-events.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-scenechange.h"
+#include "xilinx-vip.h"
+
+#define XSCD_MAX_WIDTH 3840
+#define XSCD_MAX_HEIGHT 2160
+#define XSCD_MIN_WIDTH 640
+#define XSCD_MIN_HEIGHT 480
+
+#define XSCD_V_SUBSAMPLING 16
+#define XSCD_BYTE_ALIGN 16
+#define MULTIPLICATION_FACTOR 100
+
+#define XSCD_SCENE_CHANGE 1
+#define XSCD_NO_SCENE_CHANGE 0
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscd_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return 0;
+}
+
+static int xscd_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscd_get_pad_format(struct xscd_chan *chan,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&chan->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &chan->format;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+static int xscd_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+
+ fmt->format = *__xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xscd_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscd_get_pad_format(chan, cfg, fmt->pad, fmt->which);
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCD_MIN_WIDTH, XSCD_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCD_MIN_HEIGHT, XSCD_MAX_HEIGHT);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xscd_chan_get_vid_fmt(u32 media_bus_fmt, bool memory_based)
+{
+ u32 vid_fmt;
+
+ if (memory_based) {
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_Y10;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_Y8;
+ }
+
+ return vid_fmt;
+ }
+
+ /* Streaming based */
+ switch (media_bus_fmt) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ vid_fmt = XSCD_VID_FMT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ vid_fmt = XSCD_VID_FMT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ vid_fmt = XSCD_VID_FMT_RGB;
+ break;
+ default:
+ vid_fmt = XSCD_VID_FMT_YUV_420;
+ }
+
+ return vid_fmt;
+}
+
+/**
+ * xscd_chan_configure_params - Program parameters to HW registers
+ * @chan: Driver specific channel struct pointer
+ */
+static void xscd_chan_configure_params(struct xscd_chan *chan)
+{
+ u32 vid_fmt, stride;
+
+ xscd_write(chan->iomem, XSCD_WIDTH_OFFSET, chan->format.width);
+
+ /* Stride is required only for memory based IP, not for streaming IP */
+ if (chan->xscd->memory_based) {
+ stride = roundup(chan->format.width, XSCD_BYTE_ALIGN);
+ xscd_write(chan->iomem, XSCD_STRIDE_OFFSET, stride);
+ }
+
+ xscd_write(chan->iomem, XSCD_HEIGHT_OFFSET, chan->format.height);
+
+ /* Hardware video format */
+ vid_fmt = xscd_chan_get_vid_fmt(chan->format.code,
+ chan->xscd->memory_based);
+ xscd_write(chan->iomem, XSCD_VID_FMT_OFFSET, vid_fmt);
+
+ /*
+ * This is the vertical subsampling factor of the input image. Instead
+ * of sampling every line to calculate the histogram, IP uses this
+ * register value to sample only specific lines of the frame.
+ */
+ xscd_write(chan->iomem, XSCD_SUBSAMPLE_OFFSET, XSCD_V_SUBSAMPLING);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int xscd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xscd_chan *chan = container_of(ctrl->handler, struct xscd_chan,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SCD_THRESHOLD:
+ chan->threshold = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int xscd_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscd_chan *chan = to_xscd_chan(subdev);
+ struct xscd_device *xscd = chan->xscd;
+
+ if (enable)
+ xscd_chan_configure_params(chan);
+
+ xscd_dma_enable_channel(&chan->dmachan, enable);
+
+ /*
+ * Resolution change doesn't work in stream based mode unless
+ * the device is reset.
+ */
+ if (!enable && !xscd->memory_based) {
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_DEASSERT);
+ }
+
+ return 0;
+}
+
+static int xscd_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSCD:
+ ret = v4l2_event_subscribe(fh, sub, 1, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xscd_chan *chan = to_xscd_chan(sd);
+
+ mutex_lock(&chan->lock);
+ ret = v4l2_event_unsubscribe(fh, sub);
+ mutex_unlock(&chan->lock);
+
+ return ret;
+}
+
+static int xscd_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static int xscd_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xscd_ctrl_ops = {
+ .s_ctrl = xscd_s_ctrl
+};
+
+static const struct v4l2_ctrl_config xscd_ctrls[] = {
+ {
+ .ops = &xscd_ctrl_ops,
+ .id = V4L2_CID_XILINX_SCD_THRESHOLD,
+ .name = "Threshold Value",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xscd_core_ops = {
+ .subscribe_event = xscd_subscribe_event,
+ .unsubscribe_event = xscd_unsubscribe_event
+};
+
+static struct v4l2_subdev_video_ops xscd_video_ops = {
+ .s_stream = xscd_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscd_pad_ops = {
+ .enum_mbus_code = xscd_enum_mbus_code,
+ .enum_frame_size = xscd_enum_frame_size,
+ .get_fmt = xscd_get_format,
+ .set_fmt = xscd_set_format,
+};
+
+static struct v4l2_subdev_ops xscd_ops = {
+ .core = &xscd_core_ops,
+ .video = &xscd_video_ops,
+ .pad = &xscd_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscd_internal_ops = {
+ .open = xscd_open,
+ .close = xscd_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscd_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void xscd_chan_event_notify(struct xscd_chan *chan)
+{
+ u32 *eventdata;
+ u32 sad;
+
+ sad = xscd_read(chan->iomem, XSCD_SAD_OFFSET);
+ sad = (sad * XSCD_V_SUBSAMPLING * MULTIPLICATION_FACTOR) /
+ (chan->format.width * chan->format.height);
+ eventdata = (u32 *)&chan->event.u.data;
+
+ if (sad > chan->threshold)
+ eventdata[0] = XSCD_SCENE_CHANGE;
+ else
+ eventdata[0] = XSCD_NO_SCENE_CHANGE;
+
+ chan->event.type = V4L2_EVENT_XLNXSCD;
+ v4l2_subdev_notify_event(&chan->subdev, &chan->event);
+}
+
+/**
+ * xscd_chan_init - Initialize the V4L2 subdev for a channel
+ * @xscd: Pointer to the SCD device structure
+ * @chan_id: Channel id
+ * @node: device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node)
+{
+ struct xscd_chan *chan = &xscd->chans[chan_id];
+ struct v4l2_subdev *subdev;
+ unsigned int num_pads;
+ int ret;
+ unsigned int i;
+
+ mutex_init(&chan->lock);
+ chan->xscd = xscd;
+ chan->id = chan_id;
+ chan->iomem = chan->xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &xscd_ops);
+ subdev->dev = chan->xscd->dev;
+ subdev->fwnode = of_fwnode_handle(node);
+ subdev->internal_ops = &xscd_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "xlnx-scdchan.%u",
+ chan_id);
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* Initialize default format */
+ chan->format.code = MEDIA_BUS_FMT_VYYUYY8_1X24;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = XSCD_MAX_WIDTH;
+ chan->format.height = XSCD_MAX_HEIGHT;
+
+ /* Initialize media pads */
+ num_pads = xscd->memory_based ? 1 : 2;
+
+ chan->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ if (!xscd->memory_based)
+ chan->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&subdev->entity, num_pads, chan->pads);
+ if (ret < 0)
+ goto media_init_error;
+
+ subdev->entity.ops = &xscd_media_ops;
+
+ /* Initialize V4L2 Control Handler */
+ v4l2_ctrl_handler_init(&chan->ctrl_handler, ARRAY_SIZE(xscd_ctrls));
+
+ for (i = 0; i < ARRAY_SIZE(xscd_ctrls); i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(chan->xscd->dev, "%d ctrl = 0x%x\n", i,
+ xscd_ctrls[i].id);
+ ctrl = v4l2_ctrl_new_custom(&chan->ctrl_handler, &xscd_ctrls[i],
+ NULL);
+ if (!ctrl) {
+ dev_err(chan->xscd->dev, "Failed for %s ctrl\n",
+ xscd_ctrls[i].name);
+ goto ctrl_handler_error;
+ }
+ }
+
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->xscd->dev, "failed to add controls\n");
+ ret = chan->ctrl_handler.error;
+ goto ctrl_handler_error;
+ }
+
+ subdev->ctrl_handler = &chan->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
+ if (ret < 0) {
+ dev_err(chan->xscd->dev, "failed to set controls\n");
+ goto ctrl_handler_error;
+ }
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(chan->xscd->dev, "failed to register subdev\n");
+ goto ctrl_handler_error;
+ }
+
+ dev_info(chan->xscd->dev, "Scene change detection channel found!\n");
+ return 0;
+
+ctrl_handler_error:
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+media_init_error:
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&chan->lock);
+ return ret;
+}
+
+/**
+ * xscd_chan_cleanup - Clean up the V4L2 subdev for a channel
+ * @xscd: Pointer to the SCD device structure
+ * @chan_id: Channel id
+ * @node: device node
+ */
+void xscd_chan_cleanup(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node)
+{
+ struct xscd_chan *chan = &xscd->chans[chan_id];
+ struct v4l2_subdev *subdev = &chan->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ mutex_destroy(&chan->lock);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange-dma.c b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
new file mode 100644
index 000000000000..58437a769605
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange-dma.c
@@ -0,0 +1,554 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection DMA driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/of_dma.h>
+#include <linux/slab.h>
+
+#include "../../../dma/dmaengine.h"
+
+#include "xilinx-scenechange.h"
+
+/**
+ * xscd_dma_start - Start the SCD core
+ * @xscd: The SCD device
+ * @channels: Bitmask of enabled channels
+ */
+static void xscd_dma_start(struct xscd_device *xscd, unsigned int channels)
+{
+ xscd_write(xscd->iomem, XSCD_IE_OFFSET, XSCD_IE_AP_DONE);
+ xscd_write(xscd->iomem, XSCD_GIE_OFFSET, XSCD_GIE_EN);
+ xscd_write(xscd->iomem, XSCD_CHAN_EN_OFFSET, channels);
+
+ xscd_set(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = true;
+}
+
+/**
+ * xscd_dma_stop - Stop the SCD core
+ * @xscd: The SCD device
+ */
+static void xscd_dma_stop(struct xscd_device *xscd)
+{
+ xscd_clr(xscd->iomem, XSCD_CTRL_OFFSET,
+ xscd->memory_based ? XSCD_CTRL_AP_START
+ : XSCD_CTRL_AP_START |
+ XSCD_CTRL_AUTO_RESTART);
+
+ xscd->running = false;
+}
+
+/**
+ * xscd_dma_setup_channel - Setup a channel for transfer
+ * @chan: Driver specific channel struct pointer
+ *
+ * Return: 1 if the channel starts to run for a new transfer. Otherwise, 0.
+ */
+static int xscd_dma_setup_channel(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc;
+
+ if (!chan->enabled)
+ return 0;
+
+ if (list_empty(&chan->pending_list))
+ return 0;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xscd_dma_tx_descriptor, node);
+ list_del(&desc->node);
+
+ xscd_write(chan->iomem, XSCD_ADDR_OFFSET, desc->sw.luma_plane_addr);
+ chan->active_desc = desc;
+
+ return 1;
+}
+
+/**
+ * xscd_dma_kick - Start a run of the SCD core if channels are ready
+ * @xscd: The SCD device
+ *
+ * This function starts a single run of the SCD core when all the following
+ * conditions are met:
+ *
+ * - The SCD is not currently running
+ * - At least one channel is enabled and has buffers available
+ *
+ * It can be used to start the SCD when a buffer is queued, when a channel
+ * starts streaming, or to start the next run. Calling this function is only
+ * valid for memory-based mode and is not permitted for stream-based mode.
+ *
+ * The running state for all channels is updated. Channels that are being
+ * stopped are signalled through the channel wait queue.
+ *
+ * The function must be called with the xscd_device lock held.
+ */
+static void xscd_dma_kick(struct xscd_device *xscd)
+{
+ unsigned int channels = 0;
+ unsigned int i;
+
+ lockdep_assert_held(&xscd->lock);
+
+ if (xscd->running)
+ return;
+
+ for (i = 0; i < xscd->num_streams; i++) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ unsigned long flags;
+ unsigned int running;
+ bool stopped;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ running = xscd_dma_setup_channel(chan);
+ stopped = chan->running && !running;
+ chan->running = running;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ channels |= running << chan->id;
+ if (stopped)
+ wake_up(&chan->wait);
+ }
+
+ if (channels)
+ xscd_dma_start(xscd, channels);
+ else
+ xscd_dma_stop(xscd);
+}
+
+/**
+ * xscd_dma_enable_channel - Enable/disable a channel
+ * @chan: Driver specific channel struct pointer
+ * @enable: True to enable the channel, false to disable it
+ *
+ * This function enables or disable a channel. When operating in memory-based
+ * mode, enabling a channel kicks processing if buffers are available for any
+ * enabled channel and the SCD core is idle. When operating in stream-based
+ * mode, the SCD core is started or stopped synchronously when then channel is
+ * enabled or disabled.
+ *
+ * This function must be called in non-atomic, non-interrupt context.
+ */
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable)
+{
+ struct xscd_device *xscd = chan->xscd;
+
+ if (enable) {
+ /*
+ * FIXME: Don't set chan->enabled to false here, it will be
+ * done in xscd_dma_terminate_all(). This works around a bug
+ * introduced in commit 2e77607047c6 ("xilinx: v4l2: dma: Add
+ * multiple output support") that stops all channels when the
+ * first one is stopped, even though they are part of
+ * independent pipelines. This workaround should be safe as
+ * long as dmaengine_terminate_all() is called after
+ * xvip_pipeline_set_stream().
+ */
+ spin_lock_irq(&chan->lock);
+ chan->enabled = true;
+ spin_unlock_irq(&chan->lock);
+ }
+
+ if (xscd->memory_based) {
+ if (enable) {
+ spin_lock_irq(&xscd->lock);
+ xscd_dma_kick(xscd);
+ spin_unlock_irq(&xscd->lock);
+ }
+ } else {
+ if (enable)
+ xscd_dma_start(xscd, BIT(chan->id));
+ else
+ xscd_dma_stop(xscd);
+ }
+}
+
+/**
+ * xscd_dma_irq_handler - scdma Interrupt handler
+ * @xscd: Pointer to the SCD device structure
+ */
+void xscd_dma_irq_handler(struct xscd_device *xscd)
+{
+ unsigned int i;
+
+ /*
+ * Mark the active descriptors as complete, move them to the done list
+ * and schedule the tasklet to clean them up.
+ */
+ for (i = 0; i < xscd->num_streams; ++i) {
+ struct xscd_dma_chan *chan = xscd->channels[i];
+ struct xscd_dma_tx_descriptor *desc = chan->active_desc;
+
+ if (!desc)
+ continue;
+
+ dma_cookie_complete(&desc->async_tx);
+ xscd_chan_event_notify(&xscd->chans[i]);
+
+ spin_lock(&chan->lock);
+ list_add_tail(&desc->node, &chan->done_list);
+ chan->active_desc = NULL;
+ spin_unlock(&chan->lock);
+
+ tasklet_schedule(&chan->tasklet);
+ }
+
+ /* Start the next run, if any. */
+ spin_lock(&xscd->lock);
+ xscd->running = false;
+ xscd_dma_kick(xscd);
+ spin_unlock(&xscd->lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Engine
+ */
+
+/**
+ * xscd_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xscd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xscd_dma_tx_descriptor *desc = to_xscd_dma_tx_descriptor(tx);
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->pending_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xscd_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific dma channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xscd_dma_free_desc_list(struct xscd_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+}
+
+/**
+ * xscd_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_free_descriptors(struct xscd_dma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xscd_dma_free_desc_list(chan, &chan->pending_list);
+ xscd_dma_free_desc_list(chan, &chan->done_list);
+ kfree(chan->active_desc);
+
+ chan->active_desc = NULL;
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * scd_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific dma channel
+ */
+static void xscd_dma_chan_desc_cleanup(struct xscd_dma_chan *chan)
+{
+ struct xscd_dma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ kfree(desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xscd_dma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xscd_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_dma_tx_descriptor *desc;
+ struct xscd_dma_desc *sw;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xscd_dma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ sw = &desc->sw;
+ sw->vsize = xt->numf;
+ sw->hsize = xt->sgl[0].size;
+ sw->stride = xt->sgl[0].size + xt->sgl[0].icg;
+ sw->luma_plane_addr = xt->src_start;
+
+ return &desc->async_tx;
+}
+
+static bool xscd_dma_is_running(struct xscd_dma_chan *chan)
+{
+ bool running;
+
+ spin_lock_irq(&chan->lock);
+ running = chan->running;
+ spin_unlock_irq(&chan->lock);
+
+ return running;
+}
+
+/**
+ * xscd_dma_terminate_all - Halt the channel and free descriptors
+ * @dchan: Driver specific dma channel pointer
+ *
+ * Return: 0
+ */
+static int xscd_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ int ret;
+
+ spin_lock_irq(&chan->lock);
+ chan->enabled = false;
+ spin_unlock_irq(&chan->lock);
+
+ /* Wait for any on-going transfer to complete. */
+ ret = wait_event_timeout(chan->wait, !xscd_dma_is_running(chan),
+ msecs_to_jiffies(100));
+ WARN_ON(ret == 0);
+
+ xscd_dma_free_descriptors(chan);
+ return 0;
+}
+
+/**
+ * xscd_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xscd_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+ struct xscd_device *xscd = chan->xscd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xscd->lock, flags);
+ xscd_dma_kick(xscd);
+ spin_unlock_irqrestore(&xscd->lock, flags);
+}
+
+static enum dma_status xscd_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+/**
+ * xscd_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xscd_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xscd_dma_chan *chan = to_xscd_dma_chan(dchan);
+
+ xscd_dma_free_descriptors(chan);
+}
+
+/**
+ * xscd_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx scdma channel structure
+ */
+static void xscd_dma_do_tasklet(unsigned long data)
+{
+ struct xscd_dma_chan *chan = (struct xscd_dma_chan *)data;
+
+ xscd_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xscd_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xscd_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ dma_cookie_init(dchan);
+ return 0;
+}
+
+/**
+ * of_scdma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_scdma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xscd_device *xscd = ofdma->of_dma_data;
+ u32 chan_id = dma_spec->args[0];
+
+ if (chan_id >= xscd->num_streams)
+ return NULL;
+
+ if (!xscd->channels[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xscd->channels[chan_id]->common);
+}
+
+static void xscd_dma_chan_init(struct xscd_device *xscd, int chan_id)
+{
+ struct xscd_dma_chan *chan = &xscd->chans[chan_id].dmachan;
+
+ chan->id = chan_id;
+ chan->iomem = xscd->iomem + chan->id * XSCD_CHAN_OFFSET;
+ chan->xscd = xscd;
+
+ xscd->channels[chan->id] = chan;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ tasklet_init(&chan->tasklet, xscd_dma_do_tasklet,
+ (unsigned long)chan);
+ init_waitqueue_head(&chan->wait);
+
+ chan->common.device = &xscd->dma_device;
+ list_add_tail(&chan->common.device_node, &xscd->dma_device.channels);
+}
+
+/**
+ * xscd_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xscd_dma_chan_remove(struct xscd_dma_chan *chan)
+{
+ list_del(&chan->common.device_node);
+}
+
+/**
+ * xscd_dma_init - Initialize the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xscd_dma_init(struct xscd_device *xscd)
+{
+ struct dma_device *ddev = &xscd->dma_device;
+ unsigned int chan_id;
+ int ret;
+
+ /* Initialize the DMA engine */
+ ddev->dev = xscd->dev;
+ dma_set_mask(xscd->dev, DMA_BIT_MASK(32));
+
+ INIT_LIST_HEAD(&ddev->channels);
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->device_alloc_chan_resources = xscd_dma_alloc_chan_resources;
+ ddev->device_free_chan_resources = xscd_dma_free_chan_resources;
+ ddev->device_tx_status = xscd_dma_tx_status;
+ ddev->device_issue_pending = xscd_dma_issue_pending;
+ ddev->device_terminate_all = xscd_dma_terminate_all;
+ ddev->device_prep_interleaved_dma = xscd_dma_prep_interleaved;
+
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_init(xscd, chan_id);
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register the dma device\n");
+ goto error;
+ }
+
+ ret = of_dma_controller_register(xscd->dev->of_node,
+ of_scdma_xilinx_xlate, xscd);
+ if (ret) {
+ dev_err(xscd->dev, "failed to register DMA to DT DMA helper\n");
+ goto error_of_dma;
+ }
+
+ dev_info(xscd->dev, "Xilinx Scene Change DMA is initialized!\n");
+ return 0;
+
+error_of_dma:
+ dma_async_device_unregister(ddev);
+
+error:
+ for (chan_id = 0; chan_id < xscd->num_streams; chan_id++)
+ xscd_dma_chan_remove(xscd->channels[chan_id]);
+
+ return ret;
+}
+
+/**
+ * xscd_dma_cleanup - Clean up the SCD DMA engine
+ * @xscd: Pointer to the SCD device structure
+ *
+ * This function is the counterpart of xscd_dma_init() and cleans up the
+ * resources related to the DMA engine.
+ */
+void xscd_dma_cleanup(struct xscd_device *xscd)
+{
+ dma_async_device_unregister(&xscd->dma_device);
+ of_dma_controller_free(xscd->dev->of_node);
+}
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.c b/drivers/media/platform/xilinx/xilinx-scenechange.c
new file mode 100644
index 000000000000..38db9c7d37b2
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.c
@@ -0,0 +1,195 @@
+//SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xilinx-scenechange.h"
+
+static irqreturn_t xscd_irq_handler(int irq, void *data)
+{
+ struct xscd_device *xscd = (struct xscd_device *)data;
+ u32 status;
+
+ status = xscd_read(xscd->iomem, XSCD_ISR_OFFSET);
+ if (!(status & XSCD_IE_AP_DONE))
+ return IRQ_NONE;
+
+ xscd_write(xscd->iomem, XSCD_ISR_OFFSET, XSCD_IE_AP_DONE);
+
+ if (xscd->memory_based)
+ xscd_dma_irq_handler(xscd);
+ else
+ xscd_chan_event_notify(&xscd->chans[0]);
+
+ return IRQ_HANDLED;
+}
+
+static int xscd_init_resources(struct xscd_device *xscd)
+{
+ struct platform_device *pdev = to_platform_device(xscd->dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscd->iomem = devm_ioremap_resource(xscd->dev, res);
+ if (IS_ERR(xscd->iomem))
+ return PTR_ERR(xscd->iomem);
+
+ xscd->irq = platform_get_irq(pdev, 0);
+ if (xscd->irq < 0) {
+ dev_err(xscd->dev, "No valid irq found\n");
+ return -EINVAL;
+ }
+
+ xscd->clk = devm_clk_get(xscd->dev, NULL);
+ if (IS_ERR(xscd->clk))
+ return PTR_ERR(xscd->clk);
+
+ clk_prepare_enable(xscd->clk);
+ return 0;
+}
+
+static int xscd_parse_of(struct xscd_device *xscd)
+{
+ struct device *dev = xscd->dev;
+ struct device_node *node = xscd->dev->of_node;
+ int ret;
+
+ xscd->memory_based = of_property_read_bool(node, "xlnx,memorybased");
+ xscd->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscd->rst_gpio)) {
+ if (PTR_ERR(xscd->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT\n");
+
+ return PTR_ERR(xscd->rst_gpio);
+ }
+
+ ret = of_property_read_u32(node, "xlnx,numstreams",
+ &xscd->num_streams);
+ if (ret < 0)
+ return ret;
+
+ if (!xscd->memory_based && xscd->num_streams != 1) {
+ dev_err(dev, "Stream-based mode only supports one stream\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xscd_probe(struct platform_device *pdev)
+{
+ struct xscd_device *xscd;
+ struct device_node *subdev_node;
+ unsigned int id;
+ int ret;
+
+ xscd = devm_kzalloc(&pdev->dev, sizeof(*xscd), GFP_KERNEL);
+ if (!xscd)
+ return -ENOMEM;
+
+ spin_lock_init(&xscd->lock);
+
+ xscd->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xscd);
+
+ ret = xscd_parse_of(xscd);
+ if (ret < 0)
+ return ret;
+
+ ret = xscd_init_resources(xscd);
+ if (ret < 0)
+ return ret;
+
+ /* Reset Scene Change Detection IP */
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscd->rst_gpio, XSCD_RESET_DEASSERT);
+
+ /* Initialize the channels. */
+ xscd->chans = devm_kcalloc(xscd->dev, xscd->num_streams,
+ sizeof(*xscd->chans), GFP_KERNEL);
+ if (!xscd->chans)
+ return -ENOMEM;
+
+ id = 0;
+ for_each_child_of_node(xscd->dev->of_node, subdev_node) {
+ if (id >= xscd->num_streams) {
+ dev_warn(&pdev->dev,
+ "Too many channels, limiting to %u\n",
+ xscd->num_streams);
+ of_node_put(subdev_node);
+ break;
+ }
+
+ ret = xscd_chan_init(xscd, id, subdev_node);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize channel %u\n",
+ id);
+ return ret;
+ }
+
+ id++;
+ }
+
+ /* Initialize the DMA engine. */
+ ret = xscd_dma_init(xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to initialize the DMA\n");
+
+ ret = devm_request_irq(xscd->dev, xscd->irq, xscd_irq_handler,
+ IRQF_SHARED, dev_name(xscd->dev), xscd);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to request IRQ\n");
+
+ dev_info(xscd->dev, "scene change detect device found!\n");
+ return 0;
+}
+
+static int xscd_remove(struct platform_device *pdev)
+{
+ struct xscd_device *xscd = platform_get_drvdata(pdev);
+ struct device_node *subdev_node;
+ unsigned int id = 0;
+
+ for_each_child_of_node(xscd->dev->of_node, subdev_node) {
+ xscd_chan_cleanup(xscd, id, subdev_node);
+ id++;
+ }
+
+ xscd_dma_cleanup(xscd);
+ clk_disable_unprepare(xscd->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xscd_of_id_table[] = {
+ { .compatible = "xlnx,v-scd" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xscd_of_id_table);
+
+static struct platform_driver xscd_driver = {
+ .driver = {
+ .name = "xilinx-scd",
+ .of_match_table = xscd_of_id_table,
+ },
+ .probe = xscd_probe,
+ .remove = xscd_remove,
+};
+
+module_platform_driver(xscd_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Scene Change Detection");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-scenechange.h b/drivers/media/platform/xilinx/xilinx-scenechange.h
new file mode 100644
index 000000000000..5cc9ce54bfc4
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-scenechange.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Scene Change Detection driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Authors: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+ * Satish Kumar Nagireddy <satish.nagireddy.nagireddy@xilinx.com>
+ */
+
+#ifndef _XILINX_SCENECHANGE_H_
+#define _XILINX_SCENECHANGE_H_
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+struct clk;
+struct device;
+struct device_node;
+struct gpio_desc;
+
+/* Register/Descriptor Offsets */
+#define XSCD_CTRL_OFFSET 0x000
+#define XSCD_CTRL_AP_START BIT(0)
+#define XSCD_CTRL_AP_DONE BIT(1)
+#define XSCD_CTRL_AP_IDLE BIT(2)
+#define XSCD_CTRL_AP_READY BIT(3)
+#define XSCD_CTRL_AUTO_RESTART BIT(7)
+
+#define XSCD_GIE_OFFSET 0x004
+#define XSCD_GIE_EN BIT(0)
+
+#define XSCD_IE_OFFSET 0x008
+#define XSCD_IE_AP_DONE BIT(0)
+#define XSCD_IE_AP_READY BIT(1)
+
+#define XSCD_ISR_OFFSET 0x00c
+#define XSCD_WIDTH_OFFSET 0x010
+#define XSCD_HEIGHT_OFFSET 0x018
+#define XSCD_STRIDE_OFFSET 0x020
+#define XSCD_VID_FMT_OFFSET 0x028
+#define XSCD_VID_FMT_RGB 0
+#define XSCD_VID_FMT_YUV_444 1
+#define XSCD_VID_FMT_YUV_422 2
+#define XSCD_VID_FMT_YUV_420 3
+#define XSCD_VID_FMT_Y8 24
+#define XSCD_VID_FMT_Y10 25
+
+#define XSCD_SUBSAMPLE_OFFSET 0x030
+#define XSCD_SAD_OFFSET 0x038
+#define XSCD_ADDR_OFFSET 0x040
+#define XSCD_CHAN_OFFSET 0x100
+#define XSCD_CHAN_EN_OFFSET 0x780
+
+#define XSCD_MAX_CHANNELS 8
+
+#define XSCD_RESET_DEASSERT (0)
+#define XSCD_RESET_ASSERT (1)
+
+/****************************** PROTOTYPES ************************************/
+
+struct xscd_device;
+
+/**
+ * struct xscd_dma_desc - DMA channel
+ * @luma_plane_addr: Luma plane buffer address
+ * @vsize: width of the luma frame
+ * @hsize: height of the luma frame
+ * @stride: stride of the luma frame
+ */
+struct xscd_dma_desc {
+ dma_addr_t luma_plane_addr;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+};
+
+/**
+ * struct xscd_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @sw: Software Descriptor
+ * @node: Node in the channel descriptor list
+ */
+struct xscd_dma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct xscd_dma_desc sw;
+ struct list_head node;
+};
+
+static inline struct xscd_dma_tx_descriptor *
+to_xscd_dma_tx_descriptor(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct xscd_dma_tx_descriptor, async_tx);
+}
+
+/**
+ * struct xscd_dma_chan - DMA Channel structure
+ * @xscd: SCD device
+ * @iomem: I/O memory address of the channel registers
+ * @id: scene change channel ID
+ * @common: DMA common channel
+ * @tasklet: Cleanup work after irq
+ * @lock: Protects pending_list, done_list, active_desc, enabled and running
+ * @pending_list: Descriptors waiting
+ * @done_list: Complete descriptors
+ * @active_desc: Currently active buffer being read/written to
+ * @enabled: Channel is enabled
+ * @running: Channel is running
+ * @wait: Wait queue to wait for the channel to stop
+ */
+struct xscd_dma_chan {
+ struct xscd_device *xscd;
+ void __iomem *iomem;
+ unsigned int id;
+
+ struct dma_chan common;
+ struct tasklet_struct tasklet;
+
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head done_list;
+ struct xscd_dma_tx_descriptor *active_desc;
+ unsigned int enabled;
+ unsigned int running;
+ wait_queue_head_t wait;
+};
+
+static inline struct xscd_dma_chan *to_xscd_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xscd_dma_chan, common);
+}
+
+/**
+ * struct xscd_chan - Video Stream structure
+ * @id: scene change channel ID
+ * @threshold: scene change detection threshold
+ * @iomem: I/O memory address of the channel registers
+ * @xscd: SCD device
+ * @subdev: V4L2 subdevice
+ * @ctrl_handler: V4L2 control handler
+ * @pads: media pads
+ * @format: active V4L2 media bus format for the pad
+ * @event: scene change event
+ * @dmachan: dma channel part of the scenechange stream
+ * @lock: lock to protect active stream count variable
+ */
+struct xscd_chan {
+ int id;
+ int threshold;
+ void __iomem *iomem;
+ struct xscd_device *xscd;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_event event;
+ struct xscd_dma_chan dmachan;
+
+ /* Lock to protect active stream count */
+ struct mutex lock;
+};
+
+static inline struct xscd_chan *to_xscd_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscd_chan, subdev);
+}
+
+/**
+ * struct xscd_device - Xilinx Scene Change Detection device structure
+ * @dev: (OF) device
+ * @iomem: device I/O register space remapped to kernel virtual memory
+ * @rst_gpio: reset GPIO
+ * @clk: video core clock
+ * @irq: Device IRQ
+ * @memory_based: Flag to identify memory based mode
+ * @num_streams: Number of streams in the design
+ * @chans: video stream instances
+ * @dma_device: DMA device structure
+ * @channels: DMA channels
+ * @lock: Protects the running field
+ * @running: True when the SCD core is running
+ */
+struct xscd_device {
+ struct device *dev;
+ void __iomem *iomem;
+ struct gpio_desc *rst_gpio;
+ struct clk *clk;
+ int irq;
+
+ u8 memory_based;
+ int num_streams;
+
+ struct xscd_chan *chans;
+
+ struct dma_device dma_device;
+ struct xscd_dma_chan *channels[XSCD_MAX_CHANNELS];
+
+ /* This lock is to protect the running field */
+ spinlock_t lock;
+ u8 running;
+};
+
+/*
+ * Register related operations
+ */
+static inline u32 xscd_read(void __iomem *iomem, u32 addr)
+{
+ return ioread32(iomem + addr);
+}
+
+static inline void xscd_write(void __iomem *iomem, u32 addr, u32 value)
+{
+ iowrite32(value, iomem + addr);
+}
+
+static inline void xscd_clr(void __iomem *iomem, u32 addr, u32 clr)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) & ~clr);
+}
+
+static inline void xscd_set(void __iomem *iomem, u32 addr, u32 set)
+{
+ xscd_write(iomem, addr, xscd_read(iomem, addr) | set);
+}
+
+void xscd_dma_enable_channel(struct xscd_dma_chan *chan, bool enable);
+void xscd_dma_irq_handler(struct xscd_device *xscd);
+int xscd_dma_init(struct xscd_device *xscd);
+void xscd_dma_cleanup(struct xscd_device *xscd);
+
+void xscd_chan_event_notify(struct xscd_chan *chan);
+int xscd_chan_init(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node);
+void xscd_chan_cleanup(struct xscd_device *xscd, unsigned int chan_id,
+ struct device_node *node);
+#endif
diff --git a/drivers/media/platform/xilinx/xilinx-sdirxss.c b/drivers/media/platform/xilinx/xilinx-sdirxss.c
new file mode 100644
index 000000000000..29e599a00760
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-sdirxss.c
@@ -0,0 +1,2371 @@
+/*
+ * Xilinx SDI Rx Subsystem
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Contacts: Vishal Sagar <vsagar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/media/xilinx-vip.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/v4l2-subdev.h>
+#include <linux/xilinx-sdirxss.h>
+#include <linux/xilinx-v4l2-controls.h>
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+/*
+ * SDI Rx register map, bitmask and offsets
+ */
+#define XSDIRX_RST_CTRL_REG 0x00
+#define XSDIRX_MDL_CTRL_REG 0x04
+#define XSDIRX_GLBL_IER_REG 0x0C
+#define XSDIRX_ISR_REG 0x10
+#define XSDIRX_IER_REG 0x14
+#define XSDIRX_ST352_VALID_REG 0x18
+#define XSDIRX_ST352_DS1_REG 0x1C
+#define XSDIRX_ST352_DS3_REG 0x20
+#define XSDIRX_ST352_DS5_REG 0x24
+#define XSDIRX_ST352_DS7_REG 0x28
+#define XSDIRX_ST352_DS9_REG 0x2C
+#define XSDIRX_ST352_DS11_REG 0x30
+#define XSDIRX_ST352_DS13_REG 0x34
+#define XSDIRX_ST352_DS15_REG 0x38
+#define XSDIRX_VERSION_REG 0x3C
+#define XSDIRX_SS_CONFIG_REG 0x40
+#define XSDIRX_MODE_DET_STAT_REG 0x44
+#define XSDIRX_TS_DET_STAT_REG 0x48
+#define XSDIRX_EDH_STAT_REG 0x4C
+#define XSDIRX_EDH_ERRCNT_EN_REG 0x50
+#define XSDIRX_EDH_ERRCNT_REG 0x54
+#define XSDIRX_CRC_ERRCNT_REG 0x58
+#define XSDIRX_VID_LOCK_WINDOW_REG 0x5C
+#define XSDIRX_SB_RX_STS_REG 0x60
+
+#define XSDIRX_RST_CTRL_SS_EN_MASK BIT(0)
+#define XSDIRX_RST_CTRL_SRST_MASK BIT(1)
+#define XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK BIT(2)
+#define XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK BIT(3)
+#define XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK BIT(8)
+#define XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK BIT(9)
+#define XSDIRX_RST_CTRL_BRIDGE_CH_FMT_OFFSET 10
+#define XSDIRX_RST_CTRL_BRIDGE_CH_FMT_MASK GENMASK(12, 10)
+#define XSDIRX_RST_CTRL_BRIDGE_CH_FMT_YUV444 1
+
+#define XSDIRX_MDL_CTRL_FRM_EN_MASK BIT(4)
+#define XSDIRX_MDL_CTRL_MODE_DET_EN_MASK BIT(5)
+#define XSDIRX_MDL_CTRL_MODE_HD_EN_MASK BIT(8)
+#define XSDIRX_MDL_CTRL_MODE_SD_EN_MASK BIT(9)
+#define XSDIRX_MDL_CTRL_MODE_3G_EN_MASK BIT(10)
+#define XSDIRX_MDL_CTRL_MODE_6G_EN_MASK BIT(11)
+#define XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK BIT(12)
+#define XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK BIT(13)
+#define XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK GENMASK(13, 8)
+
+#define XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET 16
+#define XSDIRX_MDL_CTRL_FORCED_MODE_MASK GENMASK(18, 16)
+
+#define XSDIRX_GLBL_INTR_EN_MASK BIT(0)
+
+#define XSDIRX_INTR_VIDLOCK_MASK BIT(0)
+#define XSDIRX_INTR_VIDUNLOCK_MASK BIT(1)
+#define XSDIRX_INTR_OVERFLOW_MASK BIT(9)
+#define XSDIRX_INTR_UNDERFLOW_MASK BIT(10)
+
+#define XSDIRX_INTR_ALL_MASK (XSDIRX_INTR_VIDLOCK_MASK |\
+ XSDIRX_INTR_VIDUNLOCK_MASK |\
+ XSDIRX_INTR_OVERFLOW_MASK |\
+ XSDIRX_INTR_UNDERFLOW_MASK)
+
+#define XSDIRX_ST352_VALID_DS1_MASK BIT(0)
+#define XSDIRX_ST352_VALID_DS3_MASK BIT(1)
+#define XSDIRX_ST352_VALID_DS5_MASK BIT(2)
+#define XSDIRX_ST352_VALID_DS7_MASK BIT(3)
+#define XSDIRX_ST352_VALID_DS9_MASK BIT(4)
+#define XSDIRX_ST352_VALID_DS11_MASK BIT(5)
+#define XSDIRX_ST352_VALID_DS13_MASK BIT(6)
+#define XSDIRX_ST352_VALID_DS15_MASK BIT(7)
+
+#define XSDIRX_MODE_DET_STAT_RX_MODE_MASK GENMASK(2, 0)
+#define XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK BIT(3)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK GENMASK(6, 4)
+#define XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET 4
+#define XSDIRX_MODE_DET_STAT_LVLB_3G_MASK BIT(7)
+
+#define XSDIRX_ACTIVE_STREAMS_1 0x0
+#define XSDIRX_ACTIVE_STREAMS_2 0x1
+#define XSDIRX_ACTIVE_STREAMS_4 0x2
+#define XSDIRX_ACTIVE_STREAMS_8 0x3
+#define XSDIRX_ACTIVE_STREAMS_16 0x4
+
+#define XSDIRX_TS_DET_STAT_LOCKED_MASK BIT(0)
+#define XSDIRX_TS_DET_STAT_SCAN_MASK BIT(1)
+#define XSDIRX_TS_DET_STAT_SCAN_OFFSET (1)
+#define XSDIRX_TS_DET_STAT_FAMILY_MASK GENMASK(7, 4)
+#define XSDIRX_TS_DET_STAT_FAMILY_OFFSET (4)
+#define XSDIRX_TS_DET_STAT_RATE_MASK GENMASK(11, 8)
+#define XSDIRX_TS_DET_STAT_RATE_OFFSET (8)
+
+#define XSDIRX_TS_DET_STAT_RATE_NONE 0x0
+#define XSDIRX_TS_DET_STAT_RATE_23_98HZ 0x2
+#define XSDIRX_TS_DET_STAT_RATE_24HZ 0x3
+#define XSDIRX_TS_DET_STAT_RATE_47_95HZ 0x4
+#define XSDIRX_TS_DET_STAT_RATE_25HZ 0x5
+#define XSDIRX_TS_DET_STAT_RATE_29_97HZ 0x6
+#define XSDIRX_TS_DET_STAT_RATE_30HZ 0x7
+#define XSDIRX_TS_DET_STAT_RATE_48HZ 0x8
+#define XSDIRX_TS_DET_STAT_RATE_50HZ 0x9
+#define XSDIRX_TS_DET_STAT_RATE_59_94HZ 0xA
+#define XSDIRX_TS_DET_STAT_RATE_60HZ 0xB
+
+#define XSDIRX_EDH_STAT_EDH_AP_MASK BIT(0)
+#define XSDIRX_EDH_STAT_EDH_FF_MASK BIT(1)
+#define XSDIRX_EDH_STAT_EDH_ANC_MASK BIT(2)
+#define XSDIRX_EDH_STAT_AP_FLAG_MASK GENMASK(8, 4)
+#define XSDIRX_EDH_STAT_FF_FLAG_MASK GENMASK(13, 9)
+#define XSDIRX_EDH_STAT_ANC_FLAG_MASK GENMASK(18, 14)
+#define XSDIRX_EDH_STAT_PKT_FLAG_MASK GENMASK(22, 19)
+
+#define XSDIRX_EDH_ERRCNT_COUNT_MASK GENMASK(15, 0)
+
+#define XSDIRX_CRC_ERRCNT_COUNT_MASK GENMASK(31, 16)
+#define XSDIRX_CRC_ERRCNT_DS_CRC_MASK GENMASK(15, 0)
+
+#define XSDIRX_VERSION_REV_MASK GENMASK(7, 0)
+#define XSDIRX_VERSION_PATCHID_MASK GENMASK(11, 8)
+#define XSDIRX_VERSION_VER_REV_MASK GENMASK(15, 12)
+#define XSDIRX_VERSION_VER_MIN_MASK GENMASK(23, 16)
+#define XSDIRX_VERSION_VER_MAJ_MASK GENMASK(31, 24)
+
+#define XSDIRX_SS_CONFIG_EDH_INCLUDED_MASK BIT(1)
+
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_DONE_MASK BIT(0)
+#define XSDIRX_STAT_SB_RX_TDATA_CHANGE_FAIL_MASK BIT(1)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_RESETDONE_MASK BIT(2)
+#define XSDIRX_STAT_SB_RX_TDATA_GT_BITRATE_MASK BIT(3)
+
+#define XSDIRX_DEFAULT_WIDTH (1920)
+#define XSDIRX_DEFAULT_HEIGHT (1080)
+
+#define XSDIRX_MAX_STR_LENGTH 16
+
+#define XSDIRXSS_SDI_STD_3G 0
+#define XSDIRXSS_SDI_STD_6G 1
+#define XSDIRXSS_SDI_STD_12G_8DS 2
+
+#define XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW 0x3000
+
+#define XSDIRX_MODE_HD_MASK 0x0
+#define XSDIRX_MODE_SD_MASK 0x1
+#define XSDIRX_MODE_3G_MASK 0x2
+#define XSDIRX_MODE_6G_MASK 0x4
+#define XSDIRX_MODE_12GI_MASK 0x5
+#define XSDIRX_MODE_12GF_MASK 0x6
+
+/* Maximum number of events per file handle. */
+#define XSDIRX_MAX_EVENTS (128)
+
+/* ST352 related macros */
+#define XST352_PAYLOAD_BYTE_MASK 0xFF
+#define XST352_PAYLOAD_BYTE1_SHIFT 0
+#define XST352_PAYLOAD_BYTE2_SHIFT 8
+#define XST352_PAYLOAD_BYTE3_SHIFT 16
+#define XST352_PAYLOAD_BYTE4_SHIFT 24
+
+#define XST352_BYTE1_ST292_1x720L_1_5G 0x84
+#define XST352_BYTE1_ST292_1x1080L_1_5G 0x85
+#define XST352_BYTE1_ST425_2008_750L_3GB 0x88
+#define XST352_BYTE1_ST425_2008_1125L_3GA 0x89
+#define XST352_BYTE1_ST372_DL_3GB 0x8A
+#define XST352_BYTE1_ST372_2x720L_3GB 0x8B
+#define XST352_BYTE1_ST372_2x1080L_3GB 0x8C
+#define XST352_BYTE1_ST2081_10_2160L_6G 0xC0
+#define XST352_BYTE1_ST2081_10_2_1080L_6G 0xC1
+#define XST352_BYTE1_ST2081_10_DL_2160L_6G 0xC2
+#define XST352_BYTE1_ST2082_10_2160L_12G 0xCE
+
+#define XST352_BYTE2_TS_TYPE_MASK BIT(15)
+#define XST352_BYTE2_TS_TYPE_OFFSET 15
+#define XST352_BYTE2_PIC_TYPE_MASK BIT(14)
+#define XST352_BYTE2_PIC_TYPE_OFFSET 14
+#define XST352_BYTE2_TS_PIC_TYPE_INTERLACED 0
+#define XST352_BYTE2_TS_PIC_TYPE_PROGRESSIVE 1
+
+#define XST352_BYTE2_FPS_MASK 0xF
+#define XST352_BYTE2_FPS_SHIFT 8
+#define XST352_BYTE2_FPS_24F 0x2
+#define XST352_BYTE2_FPS_24 0x3
+#define XST352_BYTE2_FPS_48F 0x4
+#define XST352_BYTE2_FPS_25 0x5
+#define XST352_BYTE2_FPS_30F 0x6
+#define XST352_BYTE2_FPS_30 0x7
+#define XST352_BYTE2_FPS_48 0x8
+#define XST352_BYTE2_FPS_50 0x9
+#define XST352_BYTE2_FPS_60F 0xA
+#define XST352_BYTE2_FPS_60 0xB
+/* Table 4 ST 2081-10:2015 */
+#define XST352_BYTE2_FPS_96 0xC
+#define XST352_BYTE2_FPS_100 0xD
+#define XST352_BYTE2_FPS_120 0xE
+#define XST352_BYTE2_FPS_120F 0xF
+
+#define XST352_BYTE3_ACT_LUMA_COUNT_MASK BIT(22)
+#define XST352_BYTE3_ACT_LUMA_COUNT_OFFSET 22
+
+#define XST352_BYTE3_COLOR_FORMAT_MASK GENMASK(19, 16)
+#define XST352_BYTE3_COLOR_FORMAT_OFFSET 16
+#define XST352_BYTE3_COLOR_FORMAT_422 0x0
+#define XST352_BYTE3_COLOR_FORMAT_YUV444 0x1
+#define XST352_BYTE3_COLOR_FORMAT_420 0x3
+#define XST352_BYTE3_COLOR_FORMAT_GBR 0x2
+
+#define XST352_BYTE4_BIT_DEPTH_MASK GENMASK(25, 24)
+#define XST352_BYTE4_BIT_DEPTH_OFFSET 24
+#define XST352_BYTE4_BIT_DEPTH_10 0x1
+#define XST352_BYTE4_BIT_DEPTH_12 0x2
+
+#define CLK_INT 148500000UL
+
+/**
+ * enum sdi_family_enc - SDI Transport Video Format Detected with Active Pixels
+ * @XSDIRX_SMPTE_ST_274: SMPTE ST 274 detected with AP 1920x1080
+ * @XSDIRX_SMPTE_ST_296: SMPTE ST 296 detected with AP 1280x720
+ * @XSDIRX_SMPTE_ST_2048_2: SMPTE ST 2048-2 detected with AP 2048x1080
+ * @XSDIRX_SMPTE_ST_295: SMPTE ST 295 detected with AP 1920x1080
+ * @XSDIRX_NTSC: NTSC encoding detected with AP 720x486
+ * @XSDIRX_PAL: PAL encoding detected with AP 720x576
+ * @XSDIRX_TS_UNKNOWN: Unknown SMPTE Transport family type
+ */
+enum sdi_family_enc {
+ XSDIRX_SMPTE_ST_274 = 0,
+ XSDIRX_SMPTE_ST_296 = 1,
+ XSDIRX_SMPTE_ST_2048_2 = 2,
+ XSDIRX_SMPTE_ST_295 = 3,
+ XSDIRX_NTSC = 8,
+ XSDIRX_PAL = 9,
+ XSDIRX_TS_UNKNOWN = 15
+};
+
+/**
+ * struct xsdirxss_core - Core configuration SDI Rx Subsystem device structure
+ * @dev: Platform structure
+ * @iomem: Base address of subsystem
+ * @irq: requested irq number
+ * @include_edh: EDH processor presence
+ * @mode: 3G/6G/12G mode
+ * @clks: array of clocks
+ * @num_clks: number of clocks
+ * @rst_gt_gpio: reset gt gpio (fmc init done)
+ * @bpc: Bits per component, can be 10 or 12
+ */
+struct xsdirxss_core {
+ struct device *dev;
+ void __iomem *iomem;
+ int irq;
+ bool include_edh;
+ int mode;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct gpio_desc *rst_gt_gpio;
+ u32 bpc;
+};
+
+/**
+ * struct xsdirxss_state - SDI Rx Subsystem device structure
+ * @core: Core structure for MIPI SDI Rx Subsystem
+ * @subdev: The v4l2 subdev structure
+ * @ctrl_handler: control handler
+ * @event: Holds the video unlock event
+ * @format: Active V4L2 format on source pad
+ * @default_format: default V4L2 media bus format
+ * @frame_interval: Captures the frame rate
+ * @vip_format: format information corresponding to the active format
+ * @pad: source media pad
+ * @vidlockwin: Video lock window value set by control
+ * @edhmask: EDH mask set by control
+ * @searchmask: Search mask set by control
+ * @streaming: Flag for storing streaming state
+ * @vidlocked: Flag indicating SDI Rx has locked onto video stream
+ * @ts_is_interlaced: Flag indicating Transport Stream is interlaced.
+ * @framer_enable: Flag for framer enabled or not set by control
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xsdirxss_state {
+ struct xsdirxss_core core;
+ struct v4l2_subdev subdev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_event event;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_mbus_framefmt default_format;
+ struct v4l2_fract frame_interval;
+ const struct xvip_video_format *vip_format;
+ struct media_pad pad;
+ u32 vidlockwin;
+ u32 edhmask;
+ u16 searchmask;
+ bool streaming;
+ bool vidlocked;
+ bool ts_is_interlaced;
+ bool framer_enable;
+};
+
+/* List of clocks required by UHD-SDI Rx subsystem */
+static const char * const xsdirxss_clks[] = {
+ "s_axi_aclk", "sdi_rx_clk", "video_out_clk",
+};
+
+static const u32 xsdirxss_10bpc_mbus_fmts[] = {
+ MEDIA_BUS_FMT_UYVY10_1X20,
+ MEDIA_BUS_FMT_VYYUYY10_4X20,
+ MEDIA_BUS_FMT_VUY10_1X30,
+ MEDIA_BUS_FMT_RBG101010_1X30,
+};
+
+static const u32 xsdirxss_12bpc_mbus_fmts[] = {
+ MEDIA_BUS_FMT_UYVY12_1X24,
+ MEDIA_BUS_FMT_UYYVYY12_4X24,
+ MEDIA_BUS_FMT_VUY12_1X36,
+ MEDIA_BUS_FMT_RBG121212_1X36,
+};
+
+#define XLNX_V4L2_DV_BT_2048X1080P24 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 510, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080P25 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 400, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080P30 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 66, 20, 66, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080I48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 329, 44, 329, 2, 5, 15, 3, 5, 15, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080I50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 274, 44, 274, 2, 5, 15, 3, 5, 15, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080I60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 74250000, 66, 20, 66, 2, 5, 15, 3, 5, 15, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_1920X1080P48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080P48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 510, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080P50 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 400, 44, 148, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_2048X1080P60 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(2048, 1080, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 88, 44, 20, 4, 5, 36, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_3840X2160P48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_4096X2160P48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 594000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+#define XLNX_V4L2_DV_BT_1920X1080I48 { \
+ .type = V4L2_DV_BT_656_1120, \
+ V4L2_INIT_BT_TIMINGS(1920, 1080, 1, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 148500000, 371, 88, 371, 2, 5, 15, 3, 5, 15, \
+ V4L2_DV_BT_STD_SDI) \
+}
+
+static const struct v4l2_dv_timings fmt_cap[] = {
+ V4L2_DV_BT_SDI_720X487I60,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_CEA_4096X2160P60,
+
+ XLNX_V4L2_DV_BT_2048X1080P24,
+ XLNX_V4L2_DV_BT_2048X1080P25,
+ XLNX_V4L2_DV_BT_2048X1080P30,
+ XLNX_V4L2_DV_BT_2048X1080I48,
+ XLNX_V4L2_DV_BT_2048X1080I50,
+ XLNX_V4L2_DV_BT_2048X1080I60,
+ XLNX_V4L2_DV_BT_2048X1080P48,
+ XLNX_V4L2_DV_BT_2048X1080P50,
+ XLNX_V4L2_DV_BT_2048X1080P60,
+ XLNX_V4L2_DV_BT_1920X1080P48,
+ XLNX_V4L2_DV_BT_1920X1080I48,
+ XLNX_V4L2_DV_BT_3840X2160P48,
+ XLNX_V4L2_DV_BT_4096X2160P48,
+};
+
+struct xsdirxss_dv_map {
+ u32 width;
+ u32 height;
+ u32 fps;
+ struct v4l2_dv_timings format;
+};
+
+static const struct xsdirxss_dv_map xsdirxss_dv_timings[] = {
+ /* SD - 720x487i60 */
+ { 720, 243, 30, V4L2_DV_BT_SDI_720X487I60 },
+ /* SD - 720x576i50 */
+ { 720, 288, 25, V4L2_DV_BT_CEA_720X576I50 },
+ /* HD - 1280x720p23.98 */
+ /* HD - 1280x720p24 */
+ { 1280, 720, 24, V4L2_DV_BT_CEA_1280X720P24 },
+ /* HD - 1280x720p25 */
+ { 1280, 720, 25, V4L2_DV_BT_CEA_1280X720P25 },
+ /* HD - 1280x720p29.97 */
+ /* HD - 1280x720p30 */
+ { 1280, 720, 30, V4L2_DV_BT_CEA_1280X720P30 },
+ /* HD - 1280x720p50 */
+ { 1280, 720, 50, V4L2_DV_BT_CEA_1280X720P50 },
+ /* HD - 1280x720p59.94 */
+ /* HD - 1280x720p60 */
+ { 1280, 720, 60, V4L2_DV_BT_CEA_1280X720P60 },
+ /* HD - 1920x1080p23.98 */
+ /* HD - 1920x1080p24 */
+ { 1920, 1080, 24, V4L2_DV_BT_CEA_1920X1080P24 },
+ /* HD - 1920x1080p25 */
+ { 1920, 1080, 25, V4L2_DV_BT_CEA_1920X1080P25 },
+ /* HD - 1920x1080p29.97 */
+ /* HD - 1920x1080p30 */
+ { 1920, 1080, 30, V4L2_DV_BT_CEA_1920X1080P30 },
+
+ /* HD - 2048x1080p23.98 */
+ /* HD - 2048x1080p24 */
+ { 2048, 1080, 24, XLNX_V4L2_DV_BT_2048X1080P24 },
+ /* HD - 2048x1080p25 */
+ { 2048, 1080, 24, XLNX_V4L2_DV_BT_2048X1080P25 },
+ /* HD - 2048x1080p29.97 */
+ /* HD - 2048x1080p30 */
+ { 2048, 1080, 24, XLNX_V4L2_DV_BT_2048X1080P30 },
+ /* HD - 1920x1080i47.95 */
+ /* HD - 1920x1080i48 */
+ { 1920, 540, 24, XLNX_V4L2_DV_BT_1920X1080I48 },
+
+ /* HD - 1920x1080i50 */
+ { 1920, 540, 25, V4L2_DV_BT_CEA_1920X1080I50 },
+ /* HD - 1920x1080i59.94 */
+ /* HD - 1920x1080i60 */
+ { 1920, 540, 30, V4L2_DV_BT_CEA_1920X1080I60 },
+
+ /* HD - 2048x1080i47.95 */
+ /* HD - 2048x1080i48 */
+ { 2048, 540, 24, XLNX_V4L2_DV_BT_2048X1080I48 },
+ /* HD - 2048x1080i50 */
+ { 2048, 540, 25, XLNX_V4L2_DV_BT_2048X1080I50 },
+ /* HD - 2048x1080i59.94 */
+ /* HD - 2048x1080i60 */
+ { 2048, 540, 30, XLNX_V4L2_DV_BT_2048X1080I60 },
+ /* 3G - 1920x1080p47.95 */
+ /* 3G - 1920x1080p48 */
+ { 1920, 1080, 48, XLNX_V4L2_DV_BT_1920X1080P48 },
+
+ /* 3G - 1920x1080p50 148.5 */
+ { 1920, 1080, 50, V4L2_DV_BT_CEA_1920X1080P50 },
+ /* 3G - 1920x1080p59.94 148.5/1.001 */
+ /* 3G - 1920x1080p60 148.5 */
+ { 1920, 1080, 60, V4L2_DV_BT_CEA_1920X1080P60 },
+
+ /* 3G - 2048x1080p47.95 */
+ /* 3G - 2048x1080p48 */
+ { 2048, 1080, 48, XLNX_V4L2_DV_BT_2048X1080P48 },
+ /* 3G - 2048x1080p50 */
+ { 2048, 1080, 50, XLNX_V4L2_DV_BT_2048X1080P50 },
+ /* 3G - 2048x1080p59.94 */
+ /* 3G - 2048x1080p60 */
+ { 2048, 1080, 60, XLNX_V4L2_DV_BT_2048X1080P60 },
+
+ /* 6G - 3840X2160p23.98 */
+ /* 6G - 3840X2160p24 */
+ { 3840, 2160, 24, V4L2_DV_BT_CEA_3840X2160P24 },
+ /* 6G - 3840X2160p25 */
+ { 3840, 2160, 25, V4L2_DV_BT_CEA_3840X2160P25 },
+ /* 6G - 3840X2160p29.97 */
+ /* 6G - 3840X2160p30 */
+ { 3840, 2160, 30, V4L2_DV_BT_CEA_3840X2160P30 },
+ /* 6G - 4096X2160p23.98 */
+ /* 6G - 4096X2160p24 */
+ { 4096, 2160, 24, V4L2_DV_BT_CEA_4096X2160P24 },
+ /* 6G - 4096X2160p25 */
+ { 4096, 2160, 25, V4L2_DV_BT_CEA_4096X2160P25 },
+ /* 6G - 4096X2160p29.97 */
+ /* 6G - 4096X2160p30 */
+ { 4096, 2160, 30, V4L2_DV_BT_CEA_4096X2160P30 },
+ /* 12G - 3840X2160p47.95 */
+ /* 12G - 3840X2160p48 */
+ { 3840, 2160, 48, XLNX_V4L2_DV_BT_3840X2160P48 },
+
+ /* 12G - 3840X2160p50 */
+ { 3840, 2160, 50, V4L2_DV_BT_CEA_3840X2160P50 },
+ /* 12G - 3840X2160p59.94 */
+ /* 12G - 3840X2160p60 */
+ { 3840, 2160, 60, V4L2_DV_BT_CEA_3840X2160P60 },
+
+ /* 12G - 4096X2160p47.95 */
+ /* 12G - 4096X2160p48 */
+ { 3840, 2160, 48, XLNX_V4L2_DV_BT_4096X2160P48 },
+
+ /* 12G - 4096X2160p50 */
+ { 4096, 2160, 50, V4L2_DV_BT_CEA_4096X2160P50 },
+ /* 12G - 4096X2160p59.94 */
+ /* 12G - 4096X2160p60 */
+ { 4096, 2160, 60, V4L2_DV_BT_CEA_4096X2160P60 },
+};
+
+static inline struct xsdirxss_state *
+to_xsdirxssstate(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xsdirxss_state, subdev);
+}
+
+/*
+ * Register related operations
+ */
+static inline u32 xsdirxss_read(struct xsdirxss_core *xsdirxss, u32 addr)
+{
+ return ioread32(xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_write(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 value)
+{
+ iowrite32(value, xsdirxss->iomem + addr);
+}
+
+static inline void xsdirxss_clr(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 clr)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) & ~clr);
+}
+
+static inline void xsdirxss_set(struct xsdirxss_core *xsdirxss, u32 addr,
+ u32 set)
+{
+ xsdirxss_write(xsdirxss, addr, xsdirxss_read(xsdirxss, addr) | set);
+}
+
+static inline void xsdirx_core_disable(struct xsdirxss_core *core)
+{
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static inline void xsdirx_core_enable(struct xsdirxss_core *core)
+{
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, XSDIRX_RST_CTRL_SS_EN_MASK);
+}
+
+static void xsdirxss_gt_reset(struct xsdirxss_core *core)
+{
+ gpiod_set_value(core->rst_gt_gpio, 0x1);
+ udelay(1);
+ gpiod_set_value(core->rst_gt_gpio, 0x0);
+}
+
+static int xsdirx_set_modedetect(struct xsdirxss_core *core, u16 mask)
+{
+ u32 i, val;
+
+ mask &= XSDIRX_DETECT_ALL_MODES;
+ if (!mask) {
+ dev_err(core->dev, "Invalid bit mask = 0x%08x\n", mask);
+ return -EINVAL;
+ }
+
+ dev_dbg(core->dev, "mask = 0x%x\n", mask);
+
+ val = xsdirxss_read(core, XSDIRX_MDL_CTRL_REG);
+ val &= ~XSDIRX_MDL_CTRL_MODE_DET_EN_MASK;
+ val &= ~XSDIRX_MDL_CTRL_MODE_AUTO_DET_MASK;
+ val &= ~XSDIRX_MDL_CTRL_FORCED_MODE_MASK;
+
+ if (hweight16(mask) > 1) {
+ /* Multi mode detection as more than 1 bit set in mask */
+ dev_dbg(core->dev, "Detect multiple modes\n");
+ for (i = 0; i < XSDIRX_MODE_NUM_SUPPORTED; i++) {
+ switch (mask & (1 << i)) {
+ case BIT(XSDIRX_MODE_SD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_SD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_HD_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_HD_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_3G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_3G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_6G_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_6G_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GI_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GI_EN_MASK;
+ break;
+ case BIT(XSDIRX_MODE_12GF_OFFSET):
+ val |= XSDIRX_MDL_CTRL_MODE_12GF_EN_MASK;
+ break;
+ }
+ }
+ val |= XSDIRX_MDL_CTRL_MODE_DET_EN_MASK;
+ } else {
+ /* Fixed Mode */
+ u32 forced_mode_mask;
+
+ dev_dbg(core->dev, "Detect fixed mode\n");
+
+ /* Find offset of first bit set */
+ switch (__ffs(mask)) {
+ case XSDIRX_MODE_SD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_SD_MASK;
+ break;
+ case XSDIRX_MODE_HD_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_HD_MASK;
+ break;
+ case XSDIRX_MODE_3G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_3G_MASK;
+ break;
+ case XSDIRX_MODE_6G_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_6G_MASK;
+ break;
+ case XSDIRX_MODE_12GI_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GI_MASK;
+ break;
+ case XSDIRX_MODE_12GF_OFFSET:
+ forced_mode_mask = XSDIRX_MODE_12GF_MASK;
+ break;
+ default:
+ forced_mode_mask = 0;
+ }
+ dev_dbg(core->dev, "Forced Mode Mask : 0x%x\n",
+ forced_mode_mask);
+ val |= forced_mode_mask << XSDIRX_MDL_CTRL_FORCED_MODE_OFFSET;
+ }
+
+ dev_dbg(core->dev, "Modes to be detected : sdi ctrl reg = 0x%08x\n",
+ val);
+ xsdirxss_write(core, XSDIRX_MDL_CTRL_REG, val);
+
+ return 0;
+}
+
+static void xsdirx_framer(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_MDL_CTRL_REG,
+ XSDIRX_MDL_CTRL_FRM_EN_MASK);
+}
+
+static void xsdirx_setedherrcnttrigger(struct xsdirxss_core *core, u32 enable)
+{
+ u32 val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_EN_REG);
+
+ val = enable & XSDIRX_EDH_ALLERR_MASK;
+
+ xsdirxss_write(core, XSDIRX_EDH_ERRCNT_EN_REG, val);
+}
+
+static inline void xsdirx_setvidlockwindow(struct xsdirxss_core *core, u32 val)
+{
+ /*
+ * The video lock window is the amount of time for which the
+ * the mode and transport stream should be locked to get the
+ * video lock interrupt.
+ */
+ xsdirxss_write(core, XSDIRX_VID_LOCK_WINDOW_REG, val);
+}
+
+static inline void xsdirx_disableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_clr(core, XSDIRX_IER_REG, mask);
+}
+
+static inline void xsdirx_enableintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_IER_REG, mask);
+}
+
+static void xsdirx_globalintr(struct xsdirxss_core *core, bool flag)
+{
+ if (flag)
+ xsdirxss_set(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_GLBL_IER_REG,
+ XSDIRX_GLBL_INTR_EN_MASK);
+}
+
+static inline void xsdirx_clearintr(struct xsdirxss_core *core, u32 mask)
+{
+ xsdirxss_set(core, XSDIRX_ISR_REG, mask);
+}
+
+static void xsdirx_vid_bridge_control(struct xsdirxss_core *core,
+ bool enable)
+{
+ struct xsdirxss_state *state =
+ container_of(core, struct xsdirxss_state, core);
+ u32 mask = XSDIRX_RST_CTRL_SDIRX_BRIDGE_ENB_MASK;
+
+ if (state->format.code == MEDIA_BUS_FMT_VUY10_1X30 ||
+ state->format.code == MEDIA_BUS_FMT_RBG101010_1X30 ||
+ state->format.code == MEDIA_BUS_FMT_RBG121212_1X36 ||
+ state->format.code == MEDIA_BUS_FMT_VUY12_1X36)
+ mask |= (XSDIRX_RST_CTRL_BRIDGE_CH_FMT_YUV444 <<
+ XSDIRX_RST_CTRL_BRIDGE_CH_FMT_OFFSET);
+
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, mask);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, mask);
+}
+
+static void xsdirx_axis4_bridge_control(struct xsdirxss_core *core,
+ bool enable)
+{
+ if (enable)
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+ else
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_VIDIN_AXI4S_MOD_ENB_MASK);
+}
+
+static void xsdirx_streamflow_control(struct xsdirxss_core *core, bool enable)
+{
+ /* The sdi to native bridge is followed by native to axis4 bridge */
+ if (enable) {
+ xsdirx_axis4_bridge_control(core, enable);
+ xsdirx_vid_bridge_control(core, enable);
+ } else {
+ xsdirx_vid_bridge_control(core, enable);
+ xsdirx_axis4_bridge_control(core, enable);
+ }
+}
+
+static void xsdirxss_get_framerate(struct v4l2_fract *frame_interval,
+ u32 framerate)
+{
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 24000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 25000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 30000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_47_95HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_48HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 48000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 50000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ frame_interval->numerator = 1001;
+ frame_interval->denominator = 60000;
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ frame_interval->numerator = 1000;
+ frame_interval->denominator = 60000;
+ break;
+ default:
+ frame_interval->numerator = 1;
+ frame_interval->denominator = 1;
+ }
+}
+
+static void xsdirxss_set_gtclk(struct xsdirxss_state *state)
+{
+ struct clk *gtclk;
+ unsigned long clkrate;
+ int ret, is_frac;
+ struct xsdirxss_core *core = &state->core;
+ u32 mode;
+
+ mode = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ mode &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ /*
+ * TODO: For now, don't change the clock rate for any mode except 12G.
+ * In future, configure gt clock for all modes and enable clock only
+ * when needed (stream on/off).
+ */
+ if (mode != XSDIRX_MODE_12GI_MASK && mode != XSDIRX_MODE_12GF_MASK)
+ return;
+
+ xsdirx_core_disable(core);
+ xsdirx_globalintr(core, false);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+
+ /* get sdi_rx_clk */
+ gtclk = core->clks[1].clk;
+ clkrate = clk_get_rate(gtclk);
+ is_frac = state->frame_interval.numerator == 1001 ? 1 : 0;
+
+ /* calcualte clkrate */
+ if (!is_frac)
+ clkrate = CLK_INT;
+ else
+ clkrate = (CLK_INT * 1000) / 1001;
+
+ ret = clk_set_rate(gtclk, clkrate);
+ if (ret)
+ dev_err(core->dev, "failed to set clk rate = %d\n", ret);
+
+ clkrate = clk_get_rate(gtclk);
+
+ dev_dbg(core->dev, "clkrate = %lu is_frac = %d\n",
+ clkrate, is_frac);
+
+ xsdirx_framer(core, state->framer_enable);
+ xsdirx_setedherrcnttrigger(core, state->edhmask);
+ xsdirx_setvidlockwindow(core, state->vidlockwin);
+ xsdirx_set_modedetect(core, state->searchmask);
+ xsdirx_enableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_globalintr(core, true);
+ xsdirx_core_enable(core);
+}
+/**
+ * xsdirx_get_stream_properties - Get SDI Rx stream properties
+ * @state: pointer to driver state
+ *
+ * This function decodes the stream's ST352 payload (if available) to get
+ * stream properties like width, height, picture type (interlaced/progressive),
+ * etc.
+ *
+ * Return: 0 for success else errors
+ */
+static int xsdirx_get_stream_properties(struct xsdirxss_state *state)
+{
+ struct xsdirxss_core *core = &state->core;
+ u32 mode, payload = 0, val, family, valid, tscan;
+ u8 byte1 = 0, active_luma = 0, pic_type = 0, framerate = 0;
+ u8 sampling = XST352_BYTE3_COLOR_FORMAT_422;
+ struct v4l2_mbus_framefmt *format = &state->format;
+ u32 bpc = XST352_BYTE4_BIT_DEPTH_10;
+
+ mode = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ mode &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ valid = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+
+ if (mode >= XSDIRX_MODE_3G_MASK && !valid) {
+ dev_err(core->dev, "No valid ST352 payload present even for 3G mode and above\n");
+ return -EINVAL;
+ }
+
+ val = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+ if (valid & XSDIRX_ST352_VALID_DS1_MASK) {
+ payload = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+ byte1 = (payload >> XST352_PAYLOAD_BYTE1_SHIFT) &
+ XST352_PAYLOAD_BYTE_MASK;
+ active_luma = (payload & XST352_BYTE3_ACT_LUMA_COUNT_MASK) >>
+ XST352_BYTE3_ACT_LUMA_COUNT_OFFSET;
+ pic_type = (payload & XST352_BYTE2_PIC_TYPE_MASK) >>
+ XST352_BYTE2_PIC_TYPE_OFFSET;
+ framerate = (payload >> XST352_BYTE2_FPS_SHIFT) &
+ XST352_BYTE2_FPS_MASK;
+ tscan = (payload & XST352_BYTE2_TS_TYPE_MASK) >>
+ XST352_BYTE2_TS_TYPE_OFFSET;
+ sampling = (payload & XST352_BYTE3_COLOR_FORMAT_MASK) >>
+ XST352_BYTE3_COLOR_FORMAT_OFFSET;
+ bpc = (payload & XST352_BYTE4_BIT_DEPTH_MASK) >>
+ XST352_BYTE4_BIT_DEPTH_OFFSET;
+ } else {
+ dev_dbg(core->dev, "No ST352 payload available : Mode = %d\n",
+ mode);
+ framerate = (val & XSDIRX_TS_DET_STAT_RATE_MASK) >>
+ XSDIRX_TS_DET_STAT_RATE_OFFSET;
+ tscan = (val & XSDIRX_TS_DET_STAT_SCAN_MASK) >>
+ XSDIRX_TS_DET_STAT_SCAN_OFFSET;
+ }
+
+ if ((bpc == XST352_BYTE4_BIT_DEPTH_10 && core->bpc != 10) ||
+ (bpc == XST352_BYTE4_BIT_DEPTH_12 && core->bpc != 12)) {
+ dev_dbg(core->dev, "Bit depth not supported. bpc = %d core->bpc = %d\n",
+ bpc, core->bpc);
+ return -EINVAL;
+ }
+
+ family = (val & XSDIRX_TS_DET_STAT_FAMILY_MASK) >>
+ XSDIRX_TS_DET_STAT_FAMILY_OFFSET;
+ state->ts_is_interlaced = tscan ? false : true;
+
+ dev_dbg(core->dev, "ts_is_interlaced = %d, family = %d\n",
+ state->ts_is_interlaced, family);
+
+ switch (mode) {
+ case XSDIRX_MODE_HD_MASK:
+ if (!valid) {
+ /* No payload obtained */
+ dev_dbg(core->dev, "frame rate : %d, tscan = %d\n",
+ framerate, tscan);
+ /*
+ * NOTE : A progressive segmented frame pSF will be
+ * reported incorrectly as Interlaced as we rely on IP's
+ * transport scan locked bit.
+ */
+ dev_warn(core->dev, "pSF will be incorrectly reported as Interlaced\n");
+
+ switch (framerate) {
+ case XSDIRX_TS_DET_STAT_RATE_23_98HZ:
+ case XSDIRX_TS_DET_STAT_RATE_24HZ:
+ case XSDIRX_TS_DET_STAT_RATE_25HZ:
+ case XSDIRX_TS_DET_STAT_RATE_29_97HZ:
+ case XSDIRX_TS_DET_STAT_RATE_30HZ:
+ if (family == XSDIRX_SMPTE_ST_296) {
+ format->width = 1280;
+ format->height = 720;
+ format->field = V4L2_FIELD_NONE;
+ } else if (family == XSDIRX_SMPTE_ST_2048_2) {
+ format->width = 2048;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ } else {
+ format->width = 1920;
+ format->height = 1080;
+ if (tscan)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field =
+ V4L2_FIELD_ALTERNATE;
+ }
+ break;
+ case XSDIRX_TS_DET_STAT_RATE_50HZ:
+ case XSDIRX_TS_DET_STAT_RATE_59_94HZ:
+ case XSDIRX_TS_DET_STAT_RATE_60HZ:
+ if (family == XSDIRX_SMPTE_ST_274) {
+ format->width = 1920;
+ format->height = 1080;
+ } else {
+ format->width = 1280;
+ format->height = 720;
+ }
+ format->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ format->width = 1920;
+ format->height = 1080;
+ format->field = V4L2_FIELD_NONE;
+ }
+ } else {
+ dev_dbg(core->dev, "Got the payload\n");
+ switch (byte1) {
+ case XST352_BYTE1_ST292_1x720L_1_5G:
+ /* SMPTE ST 292-1 for 720 line payloads */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST292_1x1080L_1_5G:
+ /* SMPTE ST 292-1 for 1080 line payloads */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown HD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ }
+ break;
+ case XSDIRX_MODE_SD_MASK:
+ format->field = V4L2_FIELD_ALTERNATE;
+
+ switch (family) {
+ case XSDIRX_NTSC:
+ format->width = 720;
+ format->height = 486;
+ break;
+ case XSDIRX_PAL:
+ format->width = 720;
+ format->height = 576;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown SD Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST425_2008_750L_3GB:
+ /* Sec 4.1.6.1 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x720L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->width = 1280;
+ format->height = 720;
+ break;
+ case XST352_BYTE1_ST425_2008_1125L_3GA:
+ /* ST352 Table SMPTE 425-1 */
+ case XST352_BYTE1_ST372_DL_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ case XST352_BYTE1_ST372_2x1080L_3GB:
+ /* Table 13 SMPTE 425-2008 */
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 3G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2081_10_DL_2160L_6G:
+ /* Dual link 6G */
+ case XST352_BYTE1_ST2081_10_2160L_6G:
+ /* Table 3 SMPTE ST 2081-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ case XST352_BYTE1_ST2081_10_2_1080L_6G:
+ format->height = 1080;
+ if (active_luma)
+ format->width = 2048;
+ else
+ format->width = 1920;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 6G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ case XSDIRX_MODE_12GF_MASK:
+ switch (byte1) {
+ case XST352_BYTE1_ST2082_10_2160L_12G:
+ /* Section 4.3.1 SMPTE ST 2082-10 */
+ format->height = 2160;
+ if (active_luma)
+ format->width = 4096;
+ else
+ format->width = 3840;
+ break;
+ default:
+ dev_dbg(core->dev, "Unknown 12G Mode SMPTE standard\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(core->dev, "Invalid Mode\n");
+ return -EINVAL;
+ }
+
+ if (valid) {
+ if (pic_type)
+ format->field = V4L2_FIELD_NONE;
+ else
+ format->field = V4L2_FIELD_ALTERNATE;
+
+ if (format->height == 1080 && pic_type && !tscan)
+ format->field = V4L2_FIELD_ALTERNATE;
+
+ /*
+ * In 3GB DL pSF mode the video is similar to interlaced.
+ * So though it is a progressive video, its transport is
+ * interlaced and is sent as two width x (height/2) buffers.
+ */
+ if (byte1 == XST352_BYTE1_ST372_DL_3GB) {
+ if (state->ts_is_interlaced)
+ format->field = V4L2_FIELD_ALTERNATE;
+ else
+ format->field = V4L2_FIELD_NONE;
+ }
+ }
+
+ if (format->field == V4L2_FIELD_ALTERNATE)
+ format->height = format->height / 2;
+
+ switch (sampling) {
+ case XST352_BYTE3_COLOR_FORMAT_420:
+ if (core->bpc == 10)
+ format->code = MEDIA_BUS_FMT_VYYUYY10_4X20;
+ else
+ format->code = MEDIA_BUS_FMT_UYYVYY12_4X24;
+ break;
+ case XST352_BYTE3_COLOR_FORMAT_422:
+ if (core->bpc == 10)
+ format->code = MEDIA_BUS_FMT_UYVY10_1X20;
+ else
+ format->code = MEDIA_BUS_FMT_UYVY12_1X24;
+ break;
+ case XST352_BYTE3_COLOR_FORMAT_YUV444:
+ if (core->bpc == 10)
+ format->code = MEDIA_BUS_FMT_VUY10_1X30;
+ else
+ format->code = MEDIA_BUS_FMT_VUY12_1X36;
+ break;
+ case XST352_BYTE3_COLOR_FORMAT_GBR:
+ if (core->bpc == 10)
+ format->code = MEDIA_BUS_FMT_RBG101010_1X30;
+ else
+ format->code = MEDIA_BUS_FMT_RBG121212_1X36;
+ break;
+ default:
+ dev_err(core->dev, "Unsupported color format : %d\n", sampling);
+ return -EINVAL;
+ }
+
+ xsdirxss_get_framerate(&state->frame_interval, framerate);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d payload = 0x%08x ts = 0x%08x\n",
+ format->width, format->height, format->field, payload, val);
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ state->frame_interval.numerator,
+ state->frame_interval.denominator);
+ dev_dbg(core->dev, "Stream code = 0x%x\n", format->code);
+ return 0;
+}
+
+/**
+ * xsdirxss_irq_handler - Interrupt handler for SDI Rx
+ * @irq: IRQ number
+ * @dev_id: Pointer to device state
+ *
+ * The SDI Rx interrupts are cleared by writing 1 to corresponding bit.
+ *
+ * Return: IRQ_HANDLED after handling interrupts
+ */
+static irqreturn_t xsdirxss_irq_handler(int irq, void *dev_id)
+{
+ struct xsdirxss_state *state = (struct xsdirxss_state *)dev_id;
+ struct xsdirxss_core *core = &state->core;
+ u32 status;
+
+ status = xsdirxss_read(core, XSDIRX_ISR_REG);
+ dev_dbg(core->dev, "interrupt status = 0x%08x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ xsdirxss_write(core, XSDIRX_ISR_REG, status);
+
+ if (status & XSDIRX_INTR_VIDLOCK_MASK ||
+ status & XSDIRX_INTR_VIDUNLOCK_MASK) {
+ u32 val1, val2;
+
+ dev_dbg(core->dev, "video lock/unlock interrupt\n");
+
+ xsdirx_streamflow_control(core, false);
+ state->streaming = false;
+
+ val1 = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val2 = xsdirxss_read(core, XSDIRX_TS_DET_STAT_REG);
+
+ if ((val1 & XSDIRX_MODE_DET_STAT_MODE_LOCK_MASK) &&
+ (val2 & XSDIRX_TS_DET_STAT_LOCKED_MASK)) {
+ u32 mask = XSDIRX_RST_CTRL_RST_CRC_ERRCNT_MASK |
+ XSDIRX_RST_CTRL_RST_EDH_ERRCNT_MASK;
+
+ dev_dbg(core->dev, "video lock interrupt\n");
+
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG, mask);
+ xsdirxss_clr(core, XSDIRX_RST_CTRL_REG, mask);
+
+ val1 = xsdirxss_read(core, XSDIRX_ST352_VALID_REG);
+ val2 = xsdirxss_read(core, XSDIRX_ST352_DS1_REG);
+
+ dev_dbg(core->dev, "valid st352 mask = 0x%08x\n", val1);
+ dev_dbg(core->dev, "st352 payload = 0x%08x\n", val2);
+
+ if (!xsdirx_get_stream_properties(state)) {
+ state->vidlocked = true;
+ xsdirxss_set_gtclk(state);
+ } else {
+ dev_err(core->dev, "Unable to get stream properties!\n");
+ state->vidlocked = false;
+ }
+ } else {
+ dev_dbg(core->dev, "video unlock interrupt\n");
+ state->vidlocked = false;
+ }
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_SOURCE_CHANGE;
+ state->event.u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XSDIRX_INTR_UNDERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core underflow interrupt\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_UNDERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+
+ if (status & XSDIRX_INTR_OVERFLOW_MASK) {
+ dev_dbg(core->dev, "Video in to AXI4 Stream core overflow interrupt\n");
+
+ memset(&state->event, 0, sizeof(state->event));
+ state->event.type = V4L2_EVENT_XLNXSDIRX_OVERFLOW;
+ v4l2_subdev_notify_event(&state->subdev, &state->event);
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * xsdirxss_subscribe_event - Subscribe to video lock and unlock event
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 File Handle
+ * @sub: Subcribe event structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ int ret;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (sub->type) {
+ case V4L2_EVENT_XLNXSDIRX_UNDERFLOW:
+ case V4L2_EVENT_XLNXSDIRX_OVERFLOW:
+ ret = v4l2_event_subscribe(fh, sub, XSDIRX_MAX_EVENTS, NULL);
+ break;
+ case V4L2_EVENT_SOURCE_CHANGE:
+ ret = v4l2_src_change_event_subscribe(fh, sub);
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Event subscribed : 0x%08x\n", sub->type);
+ return ret;
+}
+
+/**
+ * xsdirxss_unsubscribe_event - Unsubscribe from all events registered
+ * @sd: V4L2 Sub device
+ * @fh: V4L2 file handle
+ * @sub: pointer to Event unsubscription structure
+ *
+ * Return: zero on success, else a negative error code.
+ */
+static int xsdirxss_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "Event unsubscribe : 0x%08x\n", sub->type);
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/**
+ * xsdirxss_s_ctrl - This is used to set the Xilinx SDI Rx V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the Xilinx SDI Rx
+ * Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ int ret = 0;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ dev_dbg(core->dev, "set ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+
+ if (xsdirxss->streaming) {
+ dev_err(core->dev, "Cannot set controls while streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirx_core_disable(core);
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_FRAMER:
+ xsdirx_framer(core, ctrl->val);
+ xsdirxss->framer_enable = ctrl->val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW:
+ xsdirx_setvidlockwindow(core, ctrl->val);
+ xsdirxss->vidlockwin = ctrl->val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE:
+ xsdirx_setedherrcnttrigger(core, ctrl->val);
+ xsdirxss->edhmask = ctrl->val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_SEARCH_MODES:
+ if (ctrl->val) {
+ if (core->mode == XSDIRXSS_SDI_STD_3G) {
+ dev_dbg(core->dev, "Upto 3G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_6G_OFFSET) |
+ BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ if (core->mode == XSDIRXSS_SDI_STD_6G) {
+ dev_dbg(core->dev, "Upto 6G supported\n");
+ ctrl->val &= ~(BIT(XSDIRX_MODE_12GI_OFFSET) |
+ BIT(XSDIRX_MODE_12GF_OFFSET));
+ }
+
+ ret = xsdirx_set_modedetect(core, ctrl->val);
+ if (!ret)
+ xsdirxss->searchmask = ctrl->val;
+ } else {
+ dev_err(core->dev, "Select at least one mode!\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ xsdirxss_set(core, XSDIRX_RST_CTRL_REG,
+ XSDIRX_RST_CTRL_SS_EN_MASK);
+ return -EINVAL;
+ }
+ xsdirx_core_enable(core);
+ return ret;
+}
+
+/**
+ * xsdirxss_g_volatile_ctrl - get the Xilinx SDI Rx controls
+ * @ctrl: Pointer to V4L2 control
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ u32 val;
+ struct xsdirxss_state *xsdirxss =
+ container_of(ctrl->handler,
+ struct xsdirxss_state, ctrl_handler);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_SDIRX_MODE_DETECT:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+
+ switch (val) {
+ case XSDIRX_MODE_SD_MASK:
+ ctrl->val = XSDIRX_MODE_SD_OFFSET;
+ break;
+ case XSDIRX_MODE_HD_MASK:
+ ctrl->val = XSDIRX_MODE_HD_OFFSET;
+ break;
+ case XSDIRX_MODE_3G_MASK:
+ ctrl->val = XSDIRX_MODE_3G_OFFSET;
+ break;
+ case XSDIRX_MODE_6G_MASK:
+ ctrl->val = XSDIRX_MODE_6G_OFFSET;
+ break;
+ case XSDIRX_MODE_12GI_MASK:
+ ctrl->val = XSDIRX_MODE_12GI_OFFSET;
+ break;
+ case XSDIRX_MODE_12GF_MASK:
+ ctrl->val = XSDIRX_MODE_12GF_OFFSET;
+ break;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_CRC:
+ ctrl->val = xsdirxss_read(core, XSDIRX_CRC_ERRCNT_REG);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_ERRCNT:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_ERRCNT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_EDH_STATUS:
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_RX_MODE_MASK;
+ if (val == XSDIRX_MODE_SD_MASK) {
+ ctrl->val = xsdirxss_read(core, XSDIRX_EDH_STAT_REG);
+ } else {
+ dev_dbg(core->dev, "%d - not in SD mode\n", ctrl->id);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ ctrl->val = xsdirxss->ts_is_interlaced;
+ break;
+ case V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_ACT_STREAM_MASK;
+ val >>= XSDIRX_MODE_DET_STAT_ACT_STREAM_OFFSET;
+ ctrl->val = 1 << val;
+ break;
+ case V4L2_CID_XILINX_SDIRX_IS_3GB:
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Can't get values when video not locked!\n");
+ return -EINVAL;
+ }
+ val = xsdirxss_read(core, XSDIRX_MODE_DET_STAT_REG);
+ val &= XSDIRX_MODE_DET_STAT_LVLB_3G_MASK;
+ ctrl->val = val ? true : false;
+ break;
+ default:
+ dev_err(core->dev, "Get Invalid control id 0x%0x\n", ctrl->id);
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "Get ctrl id = 0x%08x val = 0x%08x\n",
+ ctrl->id, ctrl->val);
+ return 0;
+}
+
+/**
+ * xsdirxss_log_status - Logs the status of the SDI Rx Subsystem
+ * @sd: Pointer to V4L2 subdevice structure
+ *
+ * This function prints the current status of Xilinx SDI Rx Subsystem
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_log_status(struct v4l2_subdev *sd)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+ u32 i;
+
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump start *****\n");
+ for (i = 0; i < 0x28; i++) {
+ u32 data;
+
+ data = xsdirxss_read(core, i * 4);
+ v4l2_info(sd, "offset 0x%08x data 0x%08x\n",
+ i * 4, data);
+ }
+ v4l2_info(sd, "***** SDI Rx subsystem reg dump end *****\n");
+ return 0;
+}
+
+/**
+ * xsdirxss_g_frame_interval - Get the frame interval
+ * @sd: V4L2 Sub device
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ * The frame rate can be integral or fractional.
+ * Integral frame rate e.g. numerator = 1000, denominator = 24000 => 24 fps
+ * Fractional frame rate e.g. numerator = 1001, denominator = 24000 => 23.97 fps
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fi->interval = xsdirxss->frame_interval;
+
+ dev_dbg(core->dev, "frame rate numerator = %d denominator = %d\n",
+ xsdirxss->frame_interval.numerator,
+ xsdirxss->frame_interval.denominator);
+ return 0;
+}
+
+/**
+ * xsdirxss_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @enable: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * Xilinx SDI Rx Subsystem.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int xsdirxss_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (enable) {
+ if (!xsdirxss->vidlocked) {
+ dev_dbg(core->dev, "Video is not locked\n");
+ return -EINVAL;
+ }
+ if (xsdirxss->streaming) {
+ dev_dbg(core->dev, "Already streaming\n");
+ return -EINVAL;
+ }
+
+ xsdirx_streamflow_control(core, true);
+ xsdirxss->streaming = true;
+ dev_dbg(core->dev, "Streaming started\n");
+ } else {
+ if (!xsdirxss->streaming) {
+ dev_dbg(core->dev, "Stopped streaming already\n");
+ return 0;
+ }
+
+ xsdirx_streamflow_control(core, false);
+ xsdirxss->streaming = false;
+ dev_dbg(core->dev, "Streaming stopped\n");
+ }
+
+ return 0;
+}
+
+/**
+ * xsdirxss_g_input_status - It is used to determine if the video signal
+ * is present / locked onto or not.
+ *
+ * @sd: V4L2 Sub device
+ * @status: status of signal locked
+ *
+ * This is used to determine if the video signal is present and locked onto
+ * by the SDI Rx core or not based on vidlocked flag.
+ *
+ * Return: zero on success
+ */
+static int xsdirxss_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ if (!xsdirxss->vidlocked)
+ *status = V4L2_IN_ST_NO_SYNC | V4L2_IN_ST_NO_SIGNAL;
+ else
+ *status = 0;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xsdirxss_get_pad_format(struct xsdirxss_state *xsdirxss,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsdirxss->subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsdirxss->format;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * xsdirxss_get_format - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ struct xsdirxss_core *core = &xsdirxss->core;
+
+ if (!xsdirxss->vidlocked) {
+ dev_err(core->dev, "Video not locked!\n");
+ return -EINVAL;
+ }
+
+ fmt->format = *__xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ dev_dbg(core->dev, "Stream width = %d height = %d Field = %d\n",
+ fmt->format.width, fmt->format.height, fmt->format.field);
+
+ return 0;
+}
+
+/**
+ * xsdirxss_set_format - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ * Since the pad format is fixed in hardware, it can't be
+ * modified on run time.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *__format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ dev_dbg(xsdirxss->core.dev,
+ "set width %d height %d code %d field %d colorspace %d\n",
+ fmt->format.width, fmt->format.height,
+ fmt->format.code, fmt->format.field,
+ fmt->format.colorspace);
+
+ __format = __xsdirxss_get_pad_format(xsdirxss, cfg,
+ fmt->pad, fmt->which);
+
+ /* Currently reset the code to one fixed in hardware */
+ /* TODO : Add checks for width height */
+ fmt->format.code = __format->code;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_enum_mbus_code - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @cfg: V4L2 subdev pad configuration
+ * @code: pointer to v4l2_subdev_mbus_code_enum structure
+ *
+ * Return: -EINVAL or zero on success
+ */
+static int xsdirxss_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+ u32 index = code->index;
+
+ if (code->pad || index >= 4)
+ return -EINVAL;
+
+ if (xsdirxss->core.bpc == 12)
+ code->code = xsdirxss_12bpc_mbus_fmts[index];
+ else
+ code->code = xsdirxss_10bpc_mbus_fmts[index];
+
+ return 0;
+}
+
+/**
+ * xsdirxss_enum_dv_timings: Enumerate all the supported DV timings
+ * @sd: pointer to v4l2 subdev structure
+ * @timings: DV timings structure to be returned.
+ *
+ * Return: -EINVAL incase of invalid index and pad or zero on success
+ */
+static int xsdirxss_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ if (timings->index >= ARRAY_SIZE(fmt_cap))
+ return -EINVAL;
+
+ if (timings->pad != 0)
+ return -EINVAL;
+
+ timings->timings = fmt_cap[timings->index];
+ return 0;
+}
+
+/**
+ * xsdirxss_query_dv_timings: Query for the current DV timings
+ * @sd: pointer to v4l2 subdev structure
+ * @timings: DV timings structure to be returned.
+ *
+ * Return: -ENOLCK when video is not locked, -ERANGE when corresponding timing
+ * entry is not found or zero on success.
+ */
+static int xsdirxss_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct xsdirxss_state *state = to_xsdirxssstate(sd);
+ unsigned int i;
+
+ if (!state->vidlocked)
+ return -ENOLCK;
+
+ for (i = 0; i < ARRAY_SIZE(xsdirxss_dv_timings); i++) {
+ if (state->format.width == xsdirxss_dv_timings[i].width &&
+ state->format.height == xsdirxss_dv_timings[i].height &&
+ state->frame_interval.denominator ==
+ (xsdirxss_dv_timings[i].fps * 1000)) {
+ *timings = xsdirxss_dv_timings[i].format;
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * xsdirxss_open - Called on v4l2_open()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_open(). It sets the default format for pad.
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xsdirxss_state *xsdirxss = to_xsdirxssstate(sd);
+
+ format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ *format = xsdirxss->default_format;
+
+ return 0;
+}
+
+/**
+ * xsdirxss_close - Called on v4l2_close()
+ * @sd: Pointer to V4L2 sub device structure
+ * @fh: Pointer to V4L2 File handle
+ *
+ * This function is called on v4l2_close().
+ *
+ * Return: 0 on success
+ */
+static int xsdirxss_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations xsdirxss_media_ops = {
+ .link_validate = v4l2_subdev_link_validate
+};
+
+static const struct v4l2_ctrl_ops xsdirxss_ctrl_ops = {
+ .g_volatile_ctrl = xsdirxss_g_volatile_ctrl,
+ .s_ctrl = xsdirxss_s_ctrl
+};
+
+static const struct v4l2_ctrl_config xsdirxss_edh_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE,
+ .name = "SDI Rx : EDH Error Count Enable",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_EDH_ALLERR_MASK,
+ .def = 0,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_ERRCNT,
+ .name = "SDI Rx : EDH Error Count",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_EDH_STATUS,
+ .name = "SDI Rx : EDH Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static const struct v4l2_ctrl_config xsdirxss_ctrls[] = {
+ {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_FRAMER,
+ .name = "SDI Rx : Enable Framer",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = true,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW,
+ .name = "SDI Rx : Video Lock Window",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = XSDIRX_DEFAULT_VIDEO_LOCK_WINDOW,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_SEARCH_MODES,
+ .name = "SDI Rx : Modes search Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = XSDIRX_DETECT_ALL_MODES,
+ .def = XSDIRX_DETECT_ALL_MODES,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_MODE_DETECT,
+ .name = "SDI Rx : Mode Detect Status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = XSDIRX_MODE_SD_OFFSET,
+ .max = XSDIRX_MODE_12GF_OFFSET,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_CRC,
+ .name = "SDI Rx : CRC Error status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xFFFFFFFF,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED,
+ .name = "SDI Rx : TS is Interlaced",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS,
+ .name = "SDI Rx : Active Streams",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 16,
+ .def = 1,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &xsdirxss_ctrl_ops,
+ .id = V4L2_CID_XILINX_SDIRX_IS_3GB,
+ .name = "SDI Rx : Is 3GB",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .def = false,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ }
+};
+
+static const struct v4l2_subdev_core_ops xsdirxss_core_ops = {
+ .log_status = xsdirxss_log_status,
+ .subscribe_event = xsdirxss_subscribe_event,
+ .unsubscribe_event = xsdirxss_unsubscribe_event
+};
+
+static const struct v4l2_subdev_video_ops xsdirxss_video_ops = {
+ .g_frame_interval = xsdirxss_g_frame_interval,
+ .s_stream = xsdirxss_s_stream,
+ .g_input_status = xsdirxss_g_input_status,
+ .query_dv_timings = xsdirxss_query_dv_timings,
+};
+
+static const struct v4l2_subdev_pad_ops xsdirxss_pad_ops = {
+ .get_fmt = xsdirxss_get_format,
+ .set_fmt = xsdirxss_set_format,
+ .enum_mbus_code = xsdirxss_enum_mbus_code,
+ .enum_dv_timings = xsdirxss_enum_dv_timings,
+};
+
+static const struct v4l2_subdev_ops xsdirxss_ops = {
+ .core = &xsdirxss_core_ops,
+ .video = &xsdirxss_video_ops,
+ .pad = &xsdirxss_pad_ops
+};
+
+static const struct v4l2_subdev_internal_ops xsdirxss_internal_ops = {
+ .open = xsdirxss_open,
+ .close = xsdirxss_close
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsdirxss_parse_of(struct xsdirxss_state *xsdirxss)
+{
+ struct device_node *node = xsdirxss->core.dev->of_node;
+ struct device_node *ports = NULL;
+ struct device_node *port = NULL;
+ unsigned int nports = 0;
+ struct xsdirxss_core *core = &xsdirxss->core;
+ int ret;
+ const char *sdi_std;
+
+ core->include_edh = of_property_read_bool(node, "xlnx,include-edh");
+ dev_dbg(core->dev, "EDH property = %s\n",
+ core->include_edh ? "Present" : "Absent");
+
+ ret = of_property_read_string(node, "xlnx,line-rate", &sdi_std);
+ if (ret < 0) {
+ dev_err(core->dev, "xlnx,line-rate property not found\n");
+ return ret;
+ }
+
+ if (!strncmp(sdi_std, "12G_SDI_8DS", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_12G_8DS;
+ } else if (!strncmp(sdi_std, "6G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_6G;
+ } else if (!strncmp(sdi_std, "3G_SDI", XSDIRX_MAX_STR_LENGTH)) {
+ core->mode = XSDIRXSS_SDI_STD_3G;
+ } else {
+ dev_err(core->dev, "Invalid Line Rate\n");
+ return -EINVAL;
+ }
+ dev_dbg(core->dev, "SDI Rx Line Rate = %s, mode = %d\n", sdi_std,
+ core->mode);
+
+ ret = of_property_read_u32(node, "xlnx,bpp", &core->bpc);
+ if (ret < 0) {
+ if (ret != -EINVAL) {
+ dev_err(core->dev, "failed to get xlnx,bpp\n");
+ return ret;
+ }
+
+ /*
+ * For backward compatibility, set default bpc as 10
+ * in case xlnx,bpp is not present.
+ */
+ core->bpc = 10;
+ }
+
+ if (core->bpc != 10 && core->bpc != 12) {
+ dev_err(core->dev, "bits per component=%u. Can be 10 or 12 only\n",
+ core->bpc);
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ for_each_child_of_node(ports, port) {
+ const struct xvip_video_format *format;
+ struct device_node *endpoint;
+
+ if (!port->name || of_node_cmp(port->name, "port"))
+ continue;
+
+ format = xvip_of_get_format(port);
+ if (IS_ERR(format)) {
+ dev_err(core->dev, "invalid format in DT");
+ return PTR_ERR(format);
+ }
+
+ dev_dbg(core->dev, "vf_code = %d bpc = %d bpp = %d\n",
+ format->vf_code, format->width, format->bpp);
+
+ if (format->vf_code != XVIP_VF_YUV_422 &&
+ format->vf_code != XVIP_VF_YUV_420 &&
+ format->vf_code != XVIP_VF_YUV_444 &&
+ format->vf_code != XVIP_VF_RBG &&
+ (core->bpc == 10 && format->width != 10) &&
+ (core->bpc == 12 && format->width != 12)) {
+ dev_err(core->dev,
+ "Incorrect UG934 video format set.\n");
+ return -EINVAL;
+ }
+ xsdirxss->vip_format = format;
+
+ endpoint = of_get_next_child(port, NULL);
+ if (!endpoint) {
+ dev_err(core->dev, "No port at\n");
+ return -EINVAL;
+ }
+
+ /* Count the number of ports. */
+ nports++;
+ }
+
+ if (nports != 1) {
+ dev_err(core->dev, "invalid number of ports %u\n", nports);
+ return -EINVAL;
+ }
+
+ /* Register interrupt handler */
+ core->irq = irq_of_parse_and_map(node, 0);
+ ret = devm_request_threaded_irq(core->dev, core->irq, NULL,
+ xsdirxss_irq_handler, IRQF_ONESHOT,
+ "xilinx-sdirxss", xsdirxss);
+ if (ret) {
+ dev_err(core->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsdirxss_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xsdirxss_state *xsdirxss;
+ struct xsdirxss_core *core;
+ struct resource *res;
+ int ret;
+ unsigned int num_ctrls, num_edh_ctrls = 0, i;
+
+ xsdirxss = devm_kzalloc(&pdev->dev, sizeof(*xsdirxss), GFP_KERNEL);
+ if (!xsdirxss)
+ return -ENOMEM;
+
+ xsdirxss->core.dev = &pdev->dev;
+ core = &xsdirxss->core;
+
+ core->rst_gt_gpio = devm_gpiod_get_optional(&pdev->dev, "reset_gt",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(core->rst_gt_gpio)) {
+ ret = PTR_ERR(core->rst_gt_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Reset GT GPIO not setup in DT\n");
+ return ret;
+ }
+
+ core->num_clks = ARRAY_SIZE(xsdirxss_clks);
+ core->clks = devm_kcalloc(&pdev->dev, core->num_clks,
+ sizeof(*core->clks), GFP_KERNEL);
+ if (!core->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < core->num_clks; i++)
+ core->clks[i].id = xsdirxss_clks[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, core->num_clks, core->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(core->num_clks, core->clks);
+ if (ret)
+ return ret;
+
+ ret = xsdirxss_parse_of(xsdirxss);
+ if (ret < 0)
+ goto clk_err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xsdirxss->core.iomem = devm_ioremap_resource(xsdirxss->core.dev, res);
+ if (IS_ERR(xsdirxss->core.iomem)) {
+ ret = PTR_ERR(xsdirxss->core.iomem);
+ goto clk_err;
+ }
+
+ /* Reset the core */
+ xsdirx_streamflow_control(core, false);
+ xsdirx_core_disable(core);
+ xsdirx_clearintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_enableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_globalintr(core, true);
+ xsdirxss_write(core, XSDIRX_CRC_ERRCNT_REG, 0xFFFF);
+
+ /* Initialize V4L2 subdevice and media entity */
+ xsdirxss->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Initialize the default format */
+ xsdirxss->default_format.code = xsdirxss->vip_format->code;
+ xsdirxss->default_format.field = V4L2_FIELD_NONE;
+ xsdirxss->default_format.colorspace = V4L2_COLORSPACE_DEFAULT;
+ xsdirxss->default_format.width = XSDIRX_DEFAULT_WIDTH;
+ xsdirxss->default_format.height = XSDIRX_DEFAULT_HEIGHT;
+
+ xsdirxss->format = xsdirxss->default_format;
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xsdirxss->subdev;
+ v4l2_subdev_init(subdev, &xsdirxss_ops);
+
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsdirxss_internal_ops;
+ strscpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ subdev->entity.ops = &xsdirxss_media_ops;
+
+ v4l2_set_subdevdata(subdev, xsdirxss);
+
+ ret = media_entity_pads_init(&subdev->entity, 1, &xsdirxss->pad);
+ if (ret < 0)
+ goto error;
+
+ /* Initialise and register the controls */
+ num_ctrls = ARRAY_SIZE(xsdirxss_ctrls);
+
+ if (xsdirxss->core.include_edh)
+ num_edh_ctrls = ARRAY_SIZE(xsdirxss_edh_ctrls);
+
+ v4l2_ctrl_handler_init(&xsdirxss->ctrl_handler,
+ (num_ctrls + num_edh_ctrls));
+
+ for (i = 0; i < num_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_ctrls[i].name, xsdirxss_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_ctrls[i], NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_ctrls[i].name);
+ goto error;
+ }
+ }
+
+ if (xsdirxss->core.include_edh) {
+ for (i = 0; i < num_edh_ctrls; i++) {
+ struct v4l2_ctrl *ctrl;
+
+ dev_dbg(xsdirxss->core.dev, "%d %s ctrl = 0x%x\n",
+ i, xsdirxss_edh_ctrls[i].name,
+ xsdirxss_edh_ctrls[i].id);
+
+ ctrl = v4l2_ctrl_new_custom(&xsdirxss->ctrl_handler,
+ &xsdirxss_edh_ctrls[i],
+ NULL);
+ if (!ctrl) {
+ dev_dbg(xsdirxss->core.dev, "Failed to add %s ctrl\n",
+ xsdirxss_edh_ctrls[i].name);
+ goto error;
+ }
+ }
+ }
+
+ if (xsdirxss->ctrl_handler.error) {
+ dev_err(&pdev->dev, "failed to add controls\n");
+ ret = xsdirxss->ctrl_handler.error;
+ goto error;
+ }
+
+ subdev->ctrl_handler = &xsdirxss->ctrl_handler;
+
+ ret = v4l2_ctrl_handler_setup(&xsdirxss->ctrl_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to set controls\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, xsdirxss);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ xsdirxss->streaming = false;
+
+ xsdirx_core_enable(core);
+ xsdirxss_gt_reset(core);
+
+ dev_info(xsdirxss->core.dev, "Xilinx SDI Rx Subsystem device found!\n");
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xsdirx_globalintr(core, false);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+clk_err:
+ clk_bulk_disable_unprepare(core->num_clks, core->clks);
+ return ret;
+}
+
+static int xsdirxss_remove(struct platform_device *pdev)
+{
+ struct xsdirxss_state *xsdirxss = platform_get_drvdata(pdev);
+ struct xsdirxss_core *core = &xsdirxss->core;
+ struct v4l2_subdev *subdev = &xsdirxss->subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xsdirxss->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+
+ xsdirx_globalintr(core, false);
+ xsdirx_disableintr(core, XSDIRX_INTR_ALL_MASK);
+ xsdirx_core_disable(core);
+ xsdirx_streamflow_control(core, false);
+
+ clk_bulk_disable_unprepare(core->num_clks, core->clks);
+
+ return 0;
+}
+
+static const struct of_device_id xsdirxss_of_id_table[] = {
+ { .compatible = "xlnx,v-smpte-uhdsdi-rx-ss" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsdirxss_of_id_table);
+
+static struct platform_driver xsdirxss_driver = {
+ .driver = {
+ .name = "xilinx-sdirxss",
+ .of_match_table = xsdirxss_of_id_table,
+ },
+ .probe = xsdirxss_probe,
+ .remove = xsdirxss_remove,
+};
+
+module_platform_driver(xsdirxss_driver);
+
+MODULE_AUTHOR("Vishal Sagar <vsagar@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx SDI Rx Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-switch.c b/drivers/media/platform/xilinx/xilinx-switch.c
new file mode 100644
index 000000000000..b0052a76c65d
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-switch.c
@@ -0,0 +1,460 @@
+/*
+ * Xilinx Video Switch
+ *
+ * Copyright (C) 2013-2015 Ideas on Board
+ * Copyright (C) 2013-2015 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XSW_CORE_CH_CTRL 0x0100
+#define XSW_CORE_CH_CTRL_FORCE (1 << 3)
+
+#define XSW_SWITCH_STATUS 0x0104
+
+/**
+ * struct xswitch_device - Xilinx Video Switch device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: media pads
+ * @nsinks: number of sink pads (2 to 8)
+ * @nsources: number of source pads (1 to 8)
+ * @routing: sink pad connected to each source pad (-1 if none)
+ * @formats: active V4L2 media bus formats on sink pads
+ */
+struct xswitch_device {
+ struct xvip_device xvip;
+
+ struct media_pad *pads;
+ unsigned int nsinks;
+ unsigned int nsources;
+
+ int routing[8];
+
+ struct v4l2_mbus_framefmt *formats;
+};
+
+static inline struct xswitch_device *to_xsw(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xswitch_device, xvip.subdev);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Video Operations
+ */
+
+static int xsw_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int unused_input;
+ unsigned int i;
+ u32 routing;
+
+ if (!enable) {
+ xvip_stop(&xsw->xvip);
+ return 0;
+ }
+
+ /*
+ * All outputs must be routed to an input. When less than 8 inputs are
+ * synthesized we can use input 7 for that purpose. Otherwise find an
+ * unused input to connect to unused outputs.
+ */
+ if (xsw->nsinks == 8) {
+ u32 mask;
+
+ for (i = 0, mask = 0xff; i < xsw->nsources; ++i) {
+ if (xsw->routing[i] != -1)
+ mask &= ~BIT(xsw->routing[i]);
+ }
+
+ /*
+ * If all inputs are used all outputs are also used. We don't
+ * need an unused input in that case, use a zero value.
+ */
+ unused_input = mask ? ffs(mask) - 1 : 0;
+ } else {
+ unused_input = 7;
+ }
+
+ /* Configure routing. */
+ for (i = 0, routing = 0; i < xsw->nsources; ++i) {
+ unsigned int route;
+
+ route = xsw->routing[i] == -1 ? unused_input : xsw->routing[i];
+ routing |= (XSW_CORE_CH_CTRL_FORCE | route)
+ << (i * 4);
+ }
+
+ xvip_write(&xsw->xvip, XSW_CORE_CH_CTRL, routing);
+
+ xvip_write(&xsw->xvip, XVIP_CTRL_CONTROL,
+ (((1 << xsw->nsources) - 1) << 4) |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static struct v4l2_mbus_framefmt *
+xsw_get_pad_format(struct xswitch_device *xsw,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xsw->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xsw->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xsw_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ int pad = fmt->pad;
+
+ if (pad >= xsw->nsinks) {
+ pad = xsw->routing[pad - xsw->nsinks];
+ if (pad < 0) {
+ memset(&fmt->format, 0, sizeof(fmt->format));
+ return 0;
+ }
+ }
+
+ fmt->format = *xsw_get_pad_format(xsw, cfg, pad, fmt->which);
+
+ return 0;
+}
+
+static int xsw_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* The source pad format is always identical to the sink pad format and
+ * can't be modified.
+ */
+ if (fmt->pad >= xsw->nsinks)
+ return xsw_get_format(subdev, cfg, fmt);
+
+ format = xsw_get_pad_format(xsw, cfg, fmt->pad, fmt->which);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XVIP_MIN_WIDTH, XVIP_MAX_WIDTH);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XVIP_MIN_HEIGHT, XVIP_MAX_HEIGHT);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+static int xsw_get_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ for (i = 0; i < min(xsw->nsources, route->num_routes); ++i) {
+ route->routes[i].sink = xsw->routing[i];
+ route->routes[i].source = i;
+ }
+
+ route->num_routes = xsw->nsources;
+
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ return 0;
+}
+
+static int xsw_set_routing(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_routing *route)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&subdev->entity.graph_obj.mdev->graph_mutex);
+
+ if (subdev->entity.stream_count) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = -1;
+
+ for (i = 0; i < route->num_routes; ++i)
+ xsw->routing[route->routes[i].source - xsw->nsinks] =
+ route->routes[i].sink;
+
+done:
+ mutex_unlock(&subdev->entity.graph_obj.mdev->graph_mutex);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/**
+ * xsw_init_formats - Initialize formats on all pads
+ * @subdev: tpgper V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ *
+ * The function sets the format on pad 0 only. In two pads mode, this is the
+ * sink pad and the set format handler will propagate the format to the source
+ * pad. In one pad mode this is the source pad.
+ */
+static void xsw_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xswitch_device *xsw = to_xsw(subdev);
+ struct v4l2_subdev_format format;
+ unsigned int i;
+
+ for (i = 0; i < xsw->nsinks; ++i) {
+ memset(&format, 0, sizeof(format));
+
+ format.pad = 0;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE;
+ format.format.width = 1920;
+ format.format.height = 1080;
+
+ xsw_set_format(subdev, fh ? fh->pad : NULL, &format);
+ }
+}
+
+static int xsw_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ xsw_init_formats(subdev, fh);
+
+ return 0;
+}
+
+static int xsw_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xsw_video_ops = {
+ .s_stream = xsw_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xsw_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xsw_get_format,
+ .set_fmt = xsw_set_format,
+ .get_routing = xsw_get_routing,
+ .set_routing = xsw_set_routing,
+};
+
+static struct v4l2_subdev_ops xsw_ops = {
+ .video = &xsw_video_ops,
+ .pad = &xsw_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xsw_internal_ops = {
+ .open = xsw_open,
+ .close = xsw_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static bool xsw_has_route(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1)
+{
+ struct xswitch_device *xsw = container_of(entity, struct xswitch_device,
+ xvip.subdev.entity);
+ unsigned int sink0, sink1;
+
+ /* Two sinks are never connected together. */
+ if (pad0 < xsw->nsinks && pad1 < xsw->nsinks)
+ return false;
+
+ sink0 = pad0 < xsw->nsinks ? pad0 : xsw->routing[pad0 - xsw->nsinks];
+ sink1 = pad1 < xsw->nsinks ? pad1 : xsw->routing[pad1 - xsw->nsinks];
+
+ return sink0 == sink1;
+}
+
+static const struct media_entity_operations xsw_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .has_route = xsw_has_route,
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static int xsw_parse_of(struct xswitch_device *xsw)
+{
+ struct device_node *node = xsw->xvip.dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(node, "#xlnx,inputs", &xsw->nsinks);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "inputs");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "#xlnx,outputs", &xsw->nsources);
+ if (ret < 0) {
+ dev_err(xsw->xvip.dev, "missing or invalid #xlnx,%s property\n",
+ "outputs");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xsw_probe(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev;
+ struct xswitch_device *xsw;
+ unsigned int npads;
+ unsigned int i;
+ int ret;
+
+ xsw = devm_kzalloc(&pdev->dev, sizeof(*xsw), GFP_KERNEL);
+ if (!xsw)
+ return -ENOMEM;
+
+ xsw->xvip.dev = &pdev->dev;
+
+ ret = xsw_parse_of(xsw);
+ if (ret < 0)
+ return ret;
+
+ ret = xvip_init_resources(&xsw->xvip);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
+ * number of pads.
+ */
+ npads = xsw->nsinks + xsw->nsources;
+ xsw->pads = devm_kzalloc(&pdev->dev, npads * sizeof(*xsw->pads),
+ GFP_KERNEL);
+ if (!xsw->pads)
+ goto error;
+
+ for (i = 0; i < xsw->nsinks; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SINK;
+ for (; i < npads; ++i)
+ xsw->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ xsw->formats = devm_kzalloc(&pdev->dev,
+ xsw->nsinks * sizeof(*xsw->formats),
+ GFP_KERNEL);
+ if (!xsw->formats)
+ goto error;
+
+ for (i = 0; i < xsw->nsources; ++i)
+ xsw->routing[i] = i < xsw->nsinks ? i : -1;
+
+ subdev = &xsw->xvip.subdev;
+ v4l2_subdev_init(subdev, &xsw_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xsw_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xsw);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ subdev->entity.ops = &xsw_media_ops;
+
+ xsw_init_formats(subdev, NULL);
+
+ ret = media_entity_pads_init(&subdev->entity, npads, xsw->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xsw);
+
+ xvip_print_version(&xsw->xvip);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xsw->xvip);
+ return ret;
+}
+
+static int xsw_remove(struct platform_device *pdev)
+{
+ struct xswitch_device *xsw = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xsw->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ xvip_cleanup_resources(&xsw->xvip);
+
+ return 0;
+}
+
+static const struct of_device_id xsw_of_id_table[] = {
+ { .compatible = "xlnx,v-switch-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xsw_of_id_table);
+
+static struct platform_driver xsw_driver = {
+ .driver = {
+ .name = "xilinx-switch",
+ .of_match_table = xsw_of_id_table,
+ },
+ .probe = xsw_probe,
+ .remove = xsw_remove,
+};
+
+module_platform_driver(xsw_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Xilinx Video Switch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index ed01bedb5db6..f840bc098d9e 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -20,6 +20,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
+#include "xilinx-hls-common.h"
#include "xilinx-vip.h"
#include "xilinx-vtc.h"
@@ -58,6 +59,36 @@
#define XTPG_BAYER_PHASE_BGGR 3
#define XTPG_BAYER_PHASE_OFF 4
+/* TPG v7 is a completely redesigned IP using Vivado HLS
+ * having a different AXI4-Lite interface
+ */
+#define XTPG_HLS_BG_PATTERN 0x0020
+#define XTPG_HLS_FG_PATTERN 0x0028
+#define XTPG_HLS_FG_PATTERN_CROSS_HAIR (1 << 1)
+#define XTPG_HLS_MASK_ID 0x0030
+#define XTPG_HLS_MOTION_SPEED 0x0038
+#define XTPG_HLS_COLOR_FORMAT 0x0040
+#define XTPG_HLS_COLOR_FORMAT_RGB 0
+#define XTPG_HLS_COLOR_FORMAT_YUV_444 1
+#define XTPG_HLS_COLOR_FORMAT_YUV_422 2
+#define XTPG_HLS_COLOR_FORMAT_YUV_420 3
+#define XTPG_HLS_CROSS_HAIR_HOR 0x0048
+#define XTPG_HLS_CROSS_HAIR_VER 0x0050
+#define XTPG_HLS_ZPLATE_HOR_CNTL_START 0x0058
+#define XTPG_HLS_ZPLATE_HOR_CNTL_DELTA 0x0060
+#define XTPG_HLS_ZPLATE_VER_CNTL_START 0x0068
+#define XTPG_HLS_ZPLATE_VER_CNTL_DELTA 0x0070
+#define XTPG_HLS_BOX_SIZE 0x0078
+#define XTPG_HLS_BOX_COLOR_RED_CB 0x0080
+#define XTPG_HLS_BOX_COLOR_GREEN_CR 0x0088
+#define XTPG_HLS_BOX_COLOR_BLUE_Y 0x0090
+#define XTPG_HLS_ENABLE_INPUT 0x0098
+#define XTPG_HLS_USE_INPUT_VID_STREAM (1 << 0)
+#define XTPG_HLS_PASS_THRU_START_X 0x00a0
+#define XTPG_HLS_PASS_THRU_START_Y 0x00a8
+#define XTPG_HLS_PASS_THRU_END_X 0x00b0
+#define XTPG_HLS_PASS_THRU_END_Y 0x00b8
+
/*
* The minimum blanking value is one clock cycle for the front porch, one clock
* cycle for the sync pulse and one clock cycle for the back porch.
@@ -67,6 +98,15 @@
#define XTPG_MIN_VBLANK 3
#define XTPG_MAX_VBLANK (XVTC_MAX_VSIZE - XVIP_MIN_HEIGHT)
+#define XTPG_MIN_WIDTH (64)
+#define XTPG_MIN_HEIGHT (64)
+#define XTPG_MAX_WIDTH (10328)
+#define XTPG_MAX_HEIGHT (7760)
+
+#define XTPG_MIN_PPC 1
+
+#define XTPG_MIN_FRM_INT 1
+
/**
* struct xtpg_device - Xilinx Test Pattern Generator device structure
* @xvip: Xilinx Video IP device
@@ -82,8 +122,15 @@
* @vblank: vertical blanking control
* @pattern: test pattern control
* @streaming: is the video stream active
+ * @is_hls: whether the IP core is HLS based
* @vtc: video timing controller
* @vtmux_gpio: video timing mux GPIO
+ * @rst_gpio: reset IP core GPIO
+ * @max_width: Maximum width supported by this instance
+ * @max_height: Maximum height supported by this instance
+ * @fi_d: frame interval denominator
+ * @fi_n: frame interval numerator
+ * @ppc: Pixels per clock control
*/
struct xtpg_device {
struct xvip_device xvip;
@@ -102,9 +149,17 @@ struct xtpg_device {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *pattern;
bool streaming;
+ bool is_hls;
struct xvtc_device *vtc;
struct gpio_desc *vtmux_gpio;
+ struct gpio_desc *rst_gpio;
+
+ u32 max_width;
+ u32 max_height;
+ u32 fi_d;
+ u32 fi_n;
+ u32 ppc;
};
static inline struct xtpg_device *to_tpg(struct v4l2_subdev *subdev)
@@ -128,6 +183,32 @@ static u32 xtpg_get_bayer_phase(unsigned int code)
}
}
+static void xtpg_config_vtc(struct xtpg_device *xtpg, int width, int height)
+{
+
+ struct xvtc_config config = {
+ .hblank_start = width / xtpg->ppc,
+ .hsync_start = width / xtpg->ppc + 1,
+ .vblank_start = height,
+ .vsync_start = height + 1,
+ .fps = xtpg->fi_d / xtpg->fi_n,
+ };
+ unsigned int htotal;
+ unsigned int vtotal;
+
+ htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
+ (v4l2_ctrl_g_ctrl(xtpg->hblank) + width) / xtpg->ppc);
+ vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
+ v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
+
+ config.hsync_end = htotal - 1;
+ config.hsize = htotal;
+ config.vsync_end = vtotal - 1;
+ config.vsize = vtotal;
+
+ xvtc_generator_start(xtpg->vtc, &config);
+}
+
static void __xtpg_update_pattern_control(struct xtpg_device *xtpg,
bool passthrough, bool pattern)
{
@@ -164,6 +245,33 @@ static void xtpg_update_pattern_control(struct xtpg_device *xtpg,
* V4L2 Subdevice Video Operations
*/
+static int xtpg_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ fi->interval.numerator = xtpg->fi_n;
+ fi->interval.denominator = xtpg->fi_d;
+
+ return 0;
+}
+
+static int xtpg_s_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct xtpg_device *xtpg = to_tpg(subdev);
+
+ if (!fi->interval.numerator || !fi->interval.denominator) {
+ xtpg->fi_n = XTPG_MIN_FRM_INT;
+ xtpg->fi_d = XTPG_MIN_FRM_INT;
+ } else {
+ xtpg->fi_n = fi->interval.numerator;
+ xtpg->fi_d = fi->interval.denominator;
+ }
+
+ return 0;
+}
+
static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct xtpg_device *xtpg = to_tpg(subdev);
@@ -173,7 +281,20 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
u32 bayer_phase;
if (!enable) {
- xvip_stop(&xtpg->xvip);
+ if (!xtpg->is_hls) {
+ xvip_stop(&xtpg->xvip);
+ } else {
+ /*
+ * There is an known issue in TPG v7.0 that on
+ * resolution change it doesn't generates pattern
+ * correctly i.e some hor/ver offset is added.
+ * As a workaround issue reset on stop.
+ */
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x1);
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ v4l2_ctrl_handler_setup(&xtpg->ctrl_handler);
+ }
+
if (xtpg->vtc)
xvtc_generator_stop(xtpg->vtc);
@@ -182,31 +303,36 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
- xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
-
- if (xtpg->vtc) {
- struct xvtc_config config = {
- .hblank_start = width,
- .hsync_start = width + 1,
- .vblank_start = height,
- .vsync_start = height + 1,
- };
- unsigned int htotal;
- unsigned int vtotal;
-
- htotal = min_t(unsigned int, XVTC_MAX_HSIZE,
- v4l2_ctrl_g_ctrl(xtpg->hblank) + width);
- vtotal = min_t(unsigned int, XVTC_MAX_VSIZE,
- v4l2_ctrl_g_ctrl(xtpg->vblank) + height);
-
- config.hsync_end = htotal - 1;
- config.hsize = htotal;
- config.vsync_end = vtotal - 1;
- config.vsize = vtotal;
-
- xvtc_generator_start(xtpg->vtc, &config);
+ if (xtpg->is_hls) {
+ u32 fmt = 0;
+
+ switch (xtpg->formats[0].code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_YUV_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ fmt = XTPG_HLS_COLOR_FORMAT_RGB;
+ break;
+ }
+ xvip_write(&xtpg->xvip, XTPG_HLS_COLOR_FORMAT, fmt);
+ xvip_write(&xtpg->xvip, XHLS_REG_COLS, width);
+ xvip_write(&xtpg->xvip, XHLS_REG_ROWS, height);
+ } else {
+ xvip_set_frame_size(&xtpg->xvip, &xtpg->formats[0]);
}
+ if (xtpg->vtc)
+ xtpg_config_vtc(xtpg, width, height);
/*
* Configure the bayer phase and video timing mux based on the
* operation mode (passthrough or test pattern generation). The test
@@ -215,7 +341,11 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
*/
mutex_lock(xtpg->ctrl_handler.lock);
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ xtpg->pattern->cur.val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
/*
@@ -229,18 +359,26 @@ static int xtpg_s_stream(struct v4l2_subdev *subdev, int enable)
mutex_unlock(xtpg->ctrl_handler.lock);
- /*
- * For TPG v5.0, the bayer phase needs to be off for the pass through
- * mode, otherwise the external input would be subsampled.
- */
- bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
- : xtpg_get_bayer_phase(xtpg->formats[0].code);
- xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
-
if (xtpg->vtmux_gpio)
gpiod_set_value_cansleep(xtpg->vtmux_gpio, !passthrough);
- xvip_start(&xtpg->xvip);
+ if (xtpg->is_hls) {
+ xvip_set(&xtpg->xvip, XTPG_HLS_ENABLE_INPUT,
+ XTPG_HLS_USE_INPUT_VID_STREAM);
+ xvip_set(&xtpg->xvip, XVIP_CTRL_CONTROL,
+ XHLS_REG_CTRL_AUTO_RESTART |
+ XVIP_CTRL_CONTROL_SW_ENABLE);
+ } else {
+ /*
+ * For TPG v5.0, the bayer phase needs to be off for the pass
+ * through mode, otherwise the external input would
+ * be subsampled.
+ */
+ bayer_phase = passthrough ? XTPG_BAYER_PHASE_OFF
+ : xtpg_get_bayer_phase(xtpg->formats[0].code);
+ xvip_write(&xtpg->xvip, XTPG_BAYER_PHASE, bayer_phase);
+ xvip_start(&xtpg->xvip);
+ }
return 0;
}
@@ -300,7 +438,27 @@ static int xtpg_set_format(struct v4l2_subdev *subdev,
__format->code = fmt->format.code;
}
- xvip_set_format_size(__format, fmt);
+ if (xtpg->is_hls) {
+ switch (fmt->format.code) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ __format->code = fmt->format.code;
+ break;
+ default:
+ __format->code = xtpg->default_format.code;
+ }
+ }
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XTPG_MIN_WIDTH, xtpg->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XTPG_MIN_HEIGHT, xtpg->max_height);
fmt->format = *__format;
@@ -322,6 +480,7 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct v4l2_mbus_framefmt *format;
+ struct xtpg_device *xtpg = to_tpg(subdev);
format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
@@ -330,12 +489,13 @@ static int xtpg_enum_frame_size(struct v4l2_subdev *subdev,
/* Min / max values for pad 0 is always fixed in both one and two pads
* modes. In two pads mode, the source pad(= 1) size is identical to
- * the sink pad size */
+ * the sink pad size.
+ */
if (fse->pad == 0) {
- fse->min_width = XVIP_MIN_WIDTH;
- fse->max_width = XVIP_MAX_WIDTH;
- fse->min_height = XVIP_MIN_HEIGHT;
- fse->max_height = XVIP_MAX_HEIGHT;
+ fse->min_width = XTPG_MIN_WIDTH;
+ fse->max_width = xtpg->max_width;
+ fse->min_height = XTPG_MIN_HEIGHT;
+ fse->max_height = xtpg->max_height;
} else {
fse->min_width = format->width;
fse->max_width = format->width;
@@ -374,8 +534,12 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_MASK, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BG_PATTERN,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_MASK, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIRS:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -386,10 +550,13 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOVING_BOX, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_COLOR_MASK:
- xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
- XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
- ctrl->val <<
- XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MASK_ID, ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
+ XTPG_PATTERN_CONTROL_COLOR_MASK_MASK,
+ ctrl->val <<
+ XTPG_PATTERN_CONTROL_COLOR_MASK_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL:
xvip_clr_or_set(&xtpg->xvip, XTPG_PATTERN_CONTROL,
@@ -404,43 +571,85 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
XTPG_PATTERN_CONTROL_MOTION, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_MOTION_SPEED:
- xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_MOTION_SPEED,
+ ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_MOTION_SPEED, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_ROW:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_ROW_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_ROW_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_HOR,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_ROW_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_ROW_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_CROSS_HAIR_COLUMN:
- xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
- XTPG_CROSS_HAIRS_COLUMN_MASK,
- ctrl->val << XTPG_CROSS_HAIRS_COLUMN_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_CROSS_HAIR_VER,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_CROSS_HAIRS,
+ XTPG_CROSS_HAIRS_COLUMN_MASK,
+ ctrl->val <<
+ XTPG_CROSS_HAIRS_COLUMN_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_HOR_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_HOR_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_HOR_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_START:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_START_MASK,
- ctrl->val << XTPG_ZPLATE_START_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_START,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_START_MASK,
+ ctrl->val << XTPG_ZPLATE_START_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_ZPLATE_VER_SPEED:
- xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
- XTPG_ZPLATE_SPEED_MASK,
- ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_ZPLATE_VER_CNTL_DELTA,
+ ctrl->val);
+ else
+ xvip_clr_and_set(&xtpg->xvip, XTPG_ZPLATE_VER_CONTROL,
+ XTPG_ZPLATE_SPEED_MASK,
+ ctrl->val << XTPG_ZPLATE_SPEED_SHIFT);
return 0;
case V4L2_CID_XILINX_TPG_BOX_SIZE:
- xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
+ if (xtpg->is_hls)
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_SIZE, ctrl->val);
+ else
+ xvip_write(&xtpg->xvip, XTPG_BOX_SIZE, ctrl->val);
return 0;
case V4L2_CID_XILINX_TPG_BOX_COLOR:
- xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ if (xtpg->is_hls) {
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_RED_CB,
+ ctrl->val >> 16);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_GREEN_CR,
+ ctrl->val >> 8);
+ xvip_write(&xtpg->xvip, XTPG_HLS_BOX_COLOR_BLUE_Y,
+ ctrl->val);
+ } else {
+ xvip_write(&xtpg->xvip, XTPG_BOX_COLOR, ctrl->val);
+ }
return 0;
case V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH:
xvip_write(&xtpg->xvip, XTPG_STUCK_PIXEL_THRESH, ctrl->val);
@@ -448,6 +657,9 @@ static int xtpg_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_XILINX_TPG_NOISE_GAIN:
xvip_write(&xtpg->xvip, XTPG_NOISE_GAIN, ctrl->val);
return 0;
+ case V4L2_CID_XILINX_TPG_HLS_FG_PATTERN:
+ xvip_write(&xtpg->xvip, XTPG_HLS_FG_PATTERN, ctrl->val);
+ return 0;
}
return 0;
@@ -461,6 +673,8 @@ static const struct v4l2_subdev_core_ops xtpg_core_ops = {
};
static const struct v4l2_subdev_video_ops xtpg_video_ops = {
+ .g_frame_interval = xtpg_g_frame_interval,
+ .s_frame_interval = xtpg_s_frame_interval,
.s_stream = xtpg_s_stream,
};
@@ -505,60 +719,51 @@ static const char *const xtpg_pattern_strings[] = {
"Black/White Checker Board",
};
-static struct v4l2_ctrl_config xtpg_ctrls[] = {
+static const char *const xtpg_hls_pattern_strings[] = {
+ "Passthrough",
+ "Horizontal Ramp",
+ "Vertical Ramp",
+ "Temporal Ramp",
+ "Solid Red",
+ "Solid Green",
+ "Solid Blue",
+ "Solid Black",
+ "Solid White",
+ "Color Bars",
+ "Zone Plate",
+ "Tartan Color Bars",
+ "Cross Hatch",
+ "Color Sweep",
+ "Vertical/Horizontal Ramps",
+ "Black/White Checker Board",
+ "PseudoRandom",
+};
+
+static const char *const xtpg_hls_fg_strings[] = {
+ "No Overlay",
+ "Moving Box",
+ "Cross Hairs",
+};
+
+static const struct v4l2_ctrl_config xtpg_hls_fg_ctrl = {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_HLS_FG_PATTERN,
+ .name = "Test Pattern: Foreground Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(xtpg_hls_fg_strings) - 1,
+ .qmenu = xtpg_hls_fg_strings,
+};
+
+static struct v4l2_ctrl_config xtpg_common_ctrls[] = {
{
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
- .name = "Test Pattern: Cross Hairs",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
- .name = "Test Pattern: Moving Box",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
- .name = "Test Pattern: Color Mask",
- .type = V4L2_CTRL_TYPE_BITMASK,
- .min = 0,
- .max = 0xf,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
- .name = "Test Pattern: Stuck Pixel",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_NOISE,
- .name = "Test Pattern: Noise",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
- }, {
- .ops = &xtpg_ctrl_ops,
- .id = V4L2_CID_XILINX_TPG_MOTION,
- .name = "Test Pattern: Motion",
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .min = false,
- .max = true,
- .step = 1,
- .def = 0,
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_COLOR_MASK,
+ .name = "Test Pattern: Color Mask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = 0x7,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_MOTION_SPEED,
@@ -642,12 +847,61 @@ static struct v4l2_ctrl_config xtpg_ctrls[] = {
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_BOX_COLOR,
- .name = "Test Pattern: Box Color(RGB)",
+ .name = "Test Pattern: Box Color(RGB/YCbCr)",
.type = V4L2_CTRL_TYPE_INTEGER,
.min = 0,
.max = (1 << 24) - 1,
.step = 1,
.def = 0,
+ },
+};
+
+static struct v4l2_ctrl_config xtpg_ctrls[] = {
+ {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_CROSS_HAIRS,
+ .name = "Test Pattern: Cross Hairs",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOVING_BOX,
+ .name = "Test Pattern: Moving Box",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_STUCK_PIXEL,
+ .name = "Test Pattern: Stuck Pixel",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_NOISE,
+ .name = "Test Pattern: Noise",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
+ }, {
+ .ops = &xtpg_ctrl_ops,
+ .id = V4L2_CID_XILINX_TPG_MOTION,
+ .name = "Test Pattern: Motion",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ .def = 0,
}, {
.ops = &xtpg_ctrl_ops,
.id = V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH,
@@ -713,6 +967,49 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
struct device_node *port;
unsigned int nports = 0;
bool has_endpoint = false;
+ int ret;
+
+ if (!of_device_is_compatible(dev->of_node, "xlnx,v-tpg-5.0"))
+ xtpg->is_hls = true;
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xtpg->max_height);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-height dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_height = XTPG_MAX_HEIGHT;
+ } else if (xtpg->max_height > XTPG_MAX_HEIGHT ||
+ xtpg->max_height < XTPG_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xtpg->max_width);
+ if (ret < 0) {
+ if (of_device_is_compatible(dev->of_node, "xlnx,v-tpg-8.0")) {
+ dev_err(dev, "xlnx,max-width dt property is missing!");
+ return -EINVAL;
+ }
+ xtpg->max_width = XTPG_MAX_WIDTH;
+ } else if (xtpg->max_width > XTPG_MAX_WIDTH ||
+ xtpg->max_width < XTPG_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,ppc",
+ &xtpg->ppc);
+ if (ret < 0) {
+ xtpg->ppc = XTPG_MIN_PPC;
+ dev_dbg(dev, "failed to read ppc in dt\n");
+ } else if ((xtpg->ppc != 1) && (xtpg->ppc != 2) &&
+ (xtpg->ppc != 4) && (xtpg->ppc != 8)) {
+ dev_err(dev, "Invalid ppc config in dt\n");
+ return -EINVAL;
+ }
ports = of_get_child_by_name(node, "ports");
if (ports == NULL)
@@ -769,6 +1066,7 @@ static int xtpg_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct xtpg_device *xtpg;
u32 i, bayer_phase;
+ u32 npatterns;
int ret;
xtpg = devm_kzalloc(&pdev->dev, sizeof(*xtpg), GFP_KERNEL);
@@ -792,14 +1090,29 @@ static int xtpg_probe(struct platform_device *pdev)
goto error_resource;
}
+ if (xtpg->is_hls) {
+ xtpg->rst_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(xtpg->rst_gpio)) {
+ ret = PTR_ERR(xtpg->rst_gpio);
+ goto error_resource;
+ }
+ }
+
xtpg->vtc = xvtc_of_get(pdev->dev.of_node);
if (IS_ERR(xtpg->vtc)) {
ret = PTR_ERR(xtpg->vtc);
goto error_resource;
}
- /* Reset and initialize the core */
- xvip_reset(&xtpg->xvip);
+ /*
+ * Reset and initialize the core. For TPG HLS version there
+ * is no SW_RESET bit hence using GPIO based reset.
+ */
+ if (xtpg->is_hls)
+ gpiod_set_value_cansleep(xtpg->rst_gpio, 0x0);
+ else
+ xvip_reset(&xtpg->xvip);
/* Initialize V4L2 subdevice and media entity. Pad numbers depend on the
* number of pads.
@@ -815,11 +1128,23 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->default_format.code = xtpg->vip_format->code;
xtpg->default_format.field = V4L2_FIELD_NONE;
xtpg->default_format.colorspace = V4L2_COLORSPACE_SRGB;
- xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
- bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
- if (bayer_phase != XTPG_BAYER_PHASE_OFF)
- xtpg->bayer = true;
+ if (xtpg->is_hls) {
+ npatterns = ARRAY_SIZE(xtpg_hls_pattern_strings);
+ xtpg->default_format.width = xvip_read(&xtpg->xvip,
+ XHLS_REG_COLS);
+ xtpg->default_format.height = xvip_read(&xtpg->xvip,
+ XHLS_REG_ROWS);
+ } else {
+ npatterns = ARRAY_SIZE(xtpg_pattern_strings);
+ xvip_get_frame_size(&xtpg->xvip, &xtpg->default_format);
+ }
+
+ if (!xtpg->is_hls) {
+ bayer_phase = xtpg_get_bayer_phase(xtpg->vip_format->code);
+ if (bayer_phase != XTPG_BAYER_PHASE_OFF)
+ xtpg->bayer = true;
+ }
xtpg->formats[0] = xtpg->default_format;
if (xtpg->npads == 2)
@@ -839,7 +1164,13 @@ static int xtpg_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
- v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 + ARRAY_SIZE(xtpg_ctrls));
+ if (xtpg->is_hls)
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 4 +
+ ARRAY_SIZE(xtpg_common_ctrls));
+ else
+ v4l2_ctrl_handler_init(&xtpg->ctrl_handler, 3 +
+ ARRAY_SIZE(xtpg_common_ctrls) +
+ ARRAY_SIZE(xtpg_ctrls));
xtpg->vblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_VBLANK, XTPG_MIN_VBLANK,
@@ -847,19 +1178,41 @@ static int xtpg_probe(struct platform_device *pdev)
xtpg->hblank = v4l2_ctrl_new_std(&xtpg->ctrl_handler, &xtpg_ctrl_ops,
V4L2_CID_HBLANK, XTPG_MIN_HBLANK,
XTPG_MAX_HBLANK, 1, 100);
- xtpg->pattern = v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
- &xtpg_ctrl_ops, V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(xtpg_pattern_strings) - 1,
- 1, 9, xtpg_pattern_strings);
- for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
- v4l2_ctrl_new_custom(&xtpg->ctrl_handler, &xtpg_ctrls[i], NULL);
+ if (xtpg->is_hls) {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_hls_pattern_strings);
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_hls_fg_ctrl, NULL);
+ } else {
+ xtpg->pattern =
+ v4l2_ctrl_new_std_menu_items(&xtpg->ctrl_handler,
+ &xtpg_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ npatterns - 1,
+ 1, 9,
+ xtpg_pattern_strings);
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_ctrls[i], NULL);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xtpg_common_ctrls); i++)
+ v4l2_ctrl_new_custom(&xtpg->ctrl_handler,
+ &xtpg_common_ctrls[i], NULL);
if (xtpg->ctrl_handler.error) {
dev_err(&pdev->dev, "failed to add controls\n");
ret = xtpg->ctrl_handler.error;
goto error;
}
+
subdev->ctrl_handler = &xtpg->ctrl_handler;
xtpg_update_pattern_control(xtpg, true, true);
@@ -874,6 +1227,10 @@ static int xtpg_probe(struct platform_device *pdev)
xvip_print_version(&xtpg->xvip);
+ /* Initialize default frame interval */
+ xtpg->fi_n = 1;
+ xtpg->fi_d = 30;
+
ret = v4l2_async_register_subdev(subdev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register subdev\n");
@@ -909,6 +1266,8 @@ static SIMPLE_DEV_PM_OPS(xtpg_pm_ops, xtpg_pm_suspend, xtpg_pm_resume);
static const struct of_device_id xtpg_of_id_table[] = {
{ .compatible = "xlnx,v-tpg-5.0" },
+ { .compatible = "xlnx,v-tpg-7.0" },
+ { .compatible = "xlnx,v-tpg-8.0" },
{ }
};
MODULE_DEVICE_TABLE(of, xtpg_of_id_table);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 6ad61b08a31a..a9559a518938 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -24,22 +24,102 @@
*/
static const struct xvip_video_format xvip_video_formats[] = {
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12, 2, 1, 1, 2 },
+ { XVIP_VF_YUV_420, 8, NULL, MEDIA_BUS_FMT_VYYUYY8_1X24,
+ 1, 12, V4L2_PIX_FMT_NV12M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 10, NULL, MEDIA_BUS_FMT_VYYUYY10_4X20,
+ 1, 12, V4L2_PIX_FMT_XV15M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 12, NULL, MEDIA_BUS_FMT_UYYVYY12_4X24,
+ 1, 12, V4L2_PIX_FMT_X012M, 2, 2, 1, 2 },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016, 2, 1, 2, 2 },
+ { XVIP_VF_YUV_420, 16, NULL, MEDIA_BUS_FMT_UYYVYY16_4X32,
+ 2, 12, V4L2_PIX_FMT_X016M, 2, 2, 1, 2 },
{ XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
- 2, V4L2_PIX_FMT_YUYV },
+ 1, 16, V4L2_PIX_FMT_NV16, 2, 1, 1, 1 },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 1, 16, V4L2_PIX_FMT_NV16M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, 16, V4L2_PIX_FMT_YUYV, 1, 1, 2, 1 },
+ { XVIP_VF_VUY_422, 8, NULL, MEDIA_BUS_FMT_UYVY8_1X16,
+ 2, 16, V4L2_PIX_FMT_UYVY, 1, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 10, NULL, MEDIA_BUS_FMT_UYVY10_1X20,
+ 1, 16, V4L2_PIX_FMT_XV20M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 12, NULL, MEDIA_BUS_FMT_UYVY12_1X24,
+ 1, 16, V4L2_PIX_FMT_X212M, 2, 2, 1, 1 },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY16_2X32,
+ 2, 16, V4L2_PIX_FMT_X216, 2, 1, 2, 1 },
+ { XVIP_VF_YUV_422, 16, NULL, MEDIA_BUS_FMT_UYVY16_2X32,
+ 2, 16, V4L2_PIX_FMT_X216M, 2, 2, 1, 1 },
{ XVIP_VF_YUV_444, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
- 3, V4L2_PIX_FMT_YUV444 },
+ 3, 24, V4L2_PIX_FMT_VUY24, 1, 1, 1, 1 },
+ { XVIP_VF_YUVX, 8, NULL, MEDIA_BUS_FMT_VUY8_1X24,
+ 4, 32, V4L2_PIX_FMT_XVUY32, 1, 1, 1, 1 },
+ { XVIP_VF_YUVX, 10, NULL, MEDIA_BUS_FMT_VUY10_1X30,
+ 3, 32, V4L2_PIX_FMT_XVUY10, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 12, NULL, MEDIA_BUS_FMT_VUY12_1X36,
+ 1, 24, V4L2_PIX_FMT_X412M, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416, 1, 1, 1, 1 },
+ { XVIP_VF_YUV_444, 16, NULL, MEDIA_BUS_FMT_VUY16_1X48,
+ 2, 24, V4L2_PIX_FMT_X416M, 1, 1, 1, 1 },
+ { XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 3, 24, V4L2_PIX_FMT_BGR24, 1, 1, 1, 1 },
{ XVIP_VF_RBG, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
- 3, 0 },
+ 3, 24, V4L2_PIX_FMT_RGB24, 1, 1, 1, 1 },
+ { XVIP_VF_BGRX, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_BGRX32, 1, 1, 1, 1 },
+ { XVIP_VF_XRGB, 8, NULL, MEDIA_BUS_FMT_RBG888_1X24,
+ 4, 32, V4L2_PIX_FMT_XBGR32, 1, 1, 1, 1 },
+ { XVIP_VF_XBGR, 10, NULL, MEDIA_BUS_FMT_RBG101010_1X30,
+ 3, 32, V4L2_PIX_FMT_XBGR30, 1, 1, 1, 1 },
+ { XVIP_VF_XBGR, 12, NULL, MEDIA_BUS_FMT_RBG121212_1X36,
+ 3, 40, V4L2_PIX_FMT_XBGR40, 1, 1, 1, 1 },
+ { XVIP_VF_RBG, 16, NULL, MEDIA_BUS_FMT_RBG161616_1X48,
+ 6, 48, V4L2_PIX_FMT_BGR48, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
- 1, V4L2_PIX_FMT_GREY },
+ 1, 8, V4L2_PIX_FMT_GREY, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 10, NULL, MEDIA_BUS_FMT_Y10_1X10,
+ 4, 32, V4L2_PIX_FMT_XY10, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 12, NULL, MEDIA_BUS_FMT_Y12_1X12,
+ 1, 12, V4L2_PIX_FMT_XY12, 1, 1, 1, 1 },
+ { XVIP_VF_Y_GREY, 16, NULL, MEDIA_BUS_FMT_Y16_1X16,
+ 2, 16, V4L2_PIX_FMT_Y16, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, V4L2_PIX_FMT_SRGGB8 },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
- 1, V4L2_PIX_FMT_SGRBG8 },
+ 1, 8, V4L2_PIX_FMT_SGRBG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
- 1, V4L2_PIX_FMT_SGBRG8 },
+ 1, 8, V4L2_PIX_FMT_SGBRG8, 1, 1, 1, 1 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
- 1, V4L2_PIX_FMT_SBGGR8 },
+ 1, 8, V4L2_PIX_FMT_SBGGR8, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "rggb", MEDIA_BUS_FMT_SRGGB12_1X12,
+ 1, 12, V4L2_PIX_FMT_SRGGB12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "grbg", MEDIA_BUS_FMT_SGRBG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGRBG12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "gbrg", MEDIA_BUS_FMT_SGBRG12_1X12,
+ 1, 12, V4L2_PIX_FMT_SGBRG12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 12, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR12, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "rggb", MEDIA_BUS_FMT_SRGGB16_1X16,
+ 1, 16, V4L2_PIX_FMT_SRGGB16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "grbg", MEDIA_BUS_FMT_SGRBG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGRBG16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "gbrg", MEDIA_BUS_FMT_SGBRG16_1X16,
+ 1, 12, V4L2_PIX_FMT_SGBRG16, 1, 1, 1, 1 },
+ { XVIP_VF_MONO_SENSOR, 16, "bggr", MEDIA_BUS_FMT_SBGGR12_1X12,
+ 1, 12, V4L2_PIX_FMT_SBGGR16, 1, 1, 1, 1 },
};
/**
@@ -89,6 +169,87 @@ const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc)
EXPORT_SYMBOL_GPL(xvip_get_format_by_fourcc);
/**
+ * xvip_bpl_scaling_factor - Retrieve bpl scaling factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of scaling factor
+ * @denominator: returning denominator of scaling factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ *numerator = 10;
+ *denominator = 8;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 12;
+ *denominator = 8;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_bpl_scaling_factor);
+
+/**
+ * xvip_width_padding_factor - Retrieve width's padding factor for a 4CC
+ * @fourcc: the format 4CC
+ * @numerator: returning numerator of padding factor
+ * @denominator: returning denominator of padding factor
+ *
+ * Return: Return numerator and denominator values by address
+ */
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_XY10:
+ case V4L2_PIX_FMT_XV15:
+ case V4L2_PIX_FMT_XV20:
+ case V4L2_PIX_FMT_XV15M:
+ case V4L2_PIX_FMT_XV20M:
+ case V4L2_PIX_FMT_XBGR30:
+ case V4L2_PIX_FMT_XVUY10:
+ /* 32 bits are required per 30 bits of data */
+ *numerator = 32;
+ *denominator = 30;
+ break;
+ case V4L2_PIX_FMT_XBGR40:
+ case V4L2_PIX_FMT_XY12:
+ case V4L2_PIX_FMT_X012:
+ case V4L2_PIX_FMT_X012M:
+ case V4L2_PIX_FMT_X212:
+ case V4L2_PIX_FMT_X212M:
+ case V4L2_PIX_FMT_X412:
+ case V4L2_PIX_FMT_X412M:
+ *numerator = 40;
+ *denominator = 36;
+ break;
+ default:
+ *numerator = 1;
+ *denominator = 1;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(xvip_width_padding_factor);
+
+/**
* xvip_of_get_format - Parse a device tree node and return format information
* @node: the device tree node
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index a528a32ea1dc..43774a14b95c 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -107,21 +107,33 @@ struct xvip_device {
* @width: AXI4 format width in bits per component
* @pattern: CFA pattern for Mono/Sensor formats
* @code: media bus format code
- * @bpp: bytes per pixel (when stored in memory)
+ * @bpl_factor: Bytes per line factor
+ * @bpp: bits per pixel
* @fourcc: V4L2 pixel format FCC identifier
+ * @num_planes: number of planes w.r.t. color format
+ * @buffers: number of buffers per format
+ * @hsub: Horizontal sampling factor of Chroma
+ * @vsub: Vertical sampling factor of Chroma
*/
struct xvip_video_format {
unsigned int vf_code;
unsigned int width;
const char *pattern;
unsigned int code;
+ unsigned int bpl_factor;
unsigned int bpp;
u32 fourcc;
+ u8 num_planes;
+ u8 buffers;
+ u8 hsub;
+ u8 vsub;
};
const struct xvip_video_format *xvip_get_format_by_code(unsigned int code);
const struct xvip_video_format *xvip_get_format_by_fourcc(u32 fourcc);
const struct xvip_video_format *xvip_of_get_format(struct device_node *node);
+void xvip_bpl_scaling_factor(u32 fourcc, u32 *numerator, u32 *denominator);
+void xvip_width_padding_factor(u32 fourcc, u32 *numerator, u32 *denominator);
void xvip_set_format_size(struct v4l2_mbus_framefmt *format,
const struct v4l2_subdev_format *fmt);
int xvip_enum_mbus_code(struct v4l2_subdev *subdev,
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index cc2856efea59..f1226e3094e5 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -27,16 +27,27 @@
#define XVIPP_DMA_S2MM 0
#define XVIPP_DMA_MM2S 1
+/*
+ * This is for backward compatibility for existing applications,
+ * and planned to be deprecated
+ */
+static bool xvip_is_mplane = true;
+MODULE_PARM_DESC(is_mplane,
+ "v4l2 device capability to handle multi planar formats");
+module_param_named(is_mplane, xvip_is_mplane, bool, 0444);
+
/**
* struct xvip_graph_entity - Entity in the video graph
* @asd: subdev asynchronous registration information
* @entity: media entity, from the corresponding V4L2 subdev
* @subdev: V4L2 subdev
+ * @streaming: status of the V4L2 subdev if streaming or not
*/
struct xvip_graph_entity {
struct v4l2_async_subdev asd; /* must be first */
struct media_entity *entity;
struct v4l2_subdev *subdev;
+ bool streaming;
};
static inline struct xvip_graph_entity *
@@ -65,6 +76,22 @@ xvip_graph_find_entity(struct xvip_composite_device *xdev,
return NULL;
}
+static struct xvip_graph_entity *
+xvip_graph_find_entity_from_media(struct xvip_composite_device *xdev,
+ struct media_entity *entity)
+{
+ struct xvip_graph_entity *xvip_entity;
+ struct v4l2_async_subdev *asd;
+
+ list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ xvip_entity = to_xvip_entity(asd);
+ if (xvip_entity->entity == entity)
+ return xvip_entity;
+ }
+
+ return NULL;
+}
+
static int xvip_graph_build_one(struct xvip_composite_device *xdev,
struct xvip_graph_entity *entity)
{
@@ -165,7 +192,6 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
}
}
- fwnode_handle_put(ep);
return ret;
}
@@ -182,6 +208,183 @@ xvip_graph_find_dma(struct xvip_composite_device *xdev, unsigned int port)
return NULL;
}
+/**
+ * xvip_graph_entity_set_streaming - Update the streaming status
+ * @xdev: Composite video device
+ * @entity: graph entity to update
+ * @enable: enable/disable streaming status
+ *
+ * Update the streaming status of given entity.
+ *
+ * Return: previous streaming status (true or false)
+ */
+static bool xvip_graph_entity_set_streaming(struct xvip_composite_device *xdev,
+ struct xvip_graph_entity *entity,
+ bool enable)
+{
+ bool status = entity->streaming;
+
+ entity->streaming = enable;
+ return status;
+}
+
+static int
+xvip_graph_entity_start_stop_subdev(struct xvip_composite_device *xdev,
+ struct xvip_graph_entity *entity, bool on)
+{
+ struct v4l2_subdev *subdev;
+ int ret = 0;
+
+ dev_dbg(xdev->dev, "%s entity %s\n",
+ on ? "Starting" : "Stopping", entity->entity->name);
+ subdev = media_entity_to_v4l2_subdev(entity->entity);
+
+ /*
+ * start or stop the subdev only once in case if they are
+ * shared between sub-graphs
+ */
+ if (on) {
+ /* power-on subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_power on failed on subdev\n");
+ xvip_graph_entity_set_streaming(xdev, entity, 0);
+ return ret;
+ }
+
+ /* stream-on subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream on failed on subdev\n");
+ v4l2_subdev_call(subdev, core, s_power, 0);
+ xvip_graph_entity_set_streaming(xdev, entity, 0);
+ }
+ } else {
+ /* stream-off subdevice */
+ ret = v4l2_subdev_call(subdev, video, s_stream, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(xdev->dev,
+ "s_stream off failed on subdev\n");
+ xvip_graph_entity_set_streaming(xdev, entity, 1);
+ }
+
+ /* power-off subdevice */
+ ret = v4l2_subdev_call(subdev, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ dev_err(xdev->dev,
+ "s_power off failed on subdev\n");
+ }
+
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * xvip_graph_entity_start_stop - start / stop the graph entity
+ * @xdev: composite device
+ * @entity: entity to check
+ * @on: boolean flag. true for enable and false for disable
+ *
+ * Check if all immediate dependencies are ready dependeing on 'on' flag.
+ * If enabling, check all source pads. Sink pads for disabling. Once all
+ * dependencies are ready, set the streaming state on the entity. If the state
+ * is already set, optimize it by skipping checks.
+ *
+ * Return: true if the state is successfully or already set. false otherwise.
+ */
+static bool xvip_graph_entity_start_stop(struct xvip_composite_device *xdev,
+ struct xvip_graph_entity *entity,
+ bool on)
+{
+ unsigned long pad_flag = on ? MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ unsigned int i;
+ struct v4l2_subdev *subdev;
+ bool state;
+ int ret;
+
+ if (entity->streaming == on)
+ return true;
+
+ for (i = 0; i < entity->entity->num_pads; i++) {
+ struct xvip_graph_entity *remote;
+ struct media_pad *pad;
+
+ if (!(entity->entity->pads[i].flags & pad_flag))
+ continue;
+
+ /* skipping not connected pads */
+ pad = media_entity_remote_pad(&entity->entity->pads[i]);
+ if (!pad || !pad->entity)
+ continue;
+
+ /*
+ * Skip if there is no remote. This entity is at the end,
+ * such as DMA, sensor, or other type.
+ */
+ remote = xvip_graph_find_entity_from_media(xdev, pad->entity);
+ if (!remote)
+ continue;
+
+ /* the dependency state doesn't meet */
+ if (remote->streaming != on) {
+ state = xvip_graph_entity_start_stop(xdev, remote, on);
+ if (!state)
+ return state;
+ }
+ }
+
+ /* set state and report if state is changed or not */
+ subdev = media_entity_to_v4l2_subdev(entity->entity);
+ state = xvip_graph_entity_set_streaming(xdev, entity, on);
+ /* This shouldn't happen as check is already above */
+ if (state == on) {
+ WARN(1, "Should never get here\n");
+ return true;
+ }
+
+ ret = xvip_graph_entity_start_stop_subdev(xdev, entity, on);
+ if (ret < 0) {
+ dev_err(xdev->dev, "ret = %d for entity %s\n",
+ ret, entity->entity->name);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * xvip_graph_start_stop - start or stop the entire graph
+ * @xdev: composite device
+ * @on: boolean flag. true for enable and false for disable
+ *
+ * Enable or disable the entire graph by iterating the asd list.
+ * xvip_graph_entity_start_stop() takes care of dependencies,
+ * or state-checking.
+ *
+ * Return: 0 for success, otherwise error code
+ */
+int xvip_graph_start_stop(struct xvip_composite_device *xdev, bool on)
+{
+ struct v4l2_async_subdev *asd;
+
+ list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ struct xvip_graph_entity *entity;
+ bool state;
+
+ entity = to_xvip_entity(asd);
+
+ state = xvip_graph_entity_start_stop(xdev, entity, on);
+ if (!state)
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
{
u32 link_flags = MEDIA_LNK_FL_ENABLED;
@@ -276,7 +479,6 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
}
}
- of_node_put(ep);
return ret;
}
@@ -442,9 +644,11 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
return ret;
if (strcmp(direction, "input") == 0)
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (strcmp(direction, "output") == 0)
- type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ type = xvip_is_mplane ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
else
return -EINVAL;
@@ -462,8 +666,14 @@ static int xvip_graph_dma_init_one(struct xvip_composite_device *xdev,
list_add_tail(&dma->list, &xdev->dmas);
- xdev->v4l2_caps |= type == V4L2_BUF_TYPE_VIDEO_CAPTURE
- ? V4L2_CAP_VIDEO_CAPTURE : V4L2_CAP_VIDEO_OUTPUT;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT;
+ else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ xdev->v4l2_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
@@ -594,6 +804,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
return -ENOMEM;
xdev->dev = &pdev->dev;
+ mutex_init(&xdev->lock);
INIT_LIST_HEAD(&xdev->dmas);
v4l2_async_notifier_init(&xdev->notifier);
@@ -620,6 +831,7 @@ static int xvip_composite_remove(struct platform_device *pdev)
{
struct xvip_composite_device *xdev = platform_get_drvdata(pdev);
+ mutex_destroy(&xdev->lock);
xvip_graph_cleanup(xdev);
xvip_composite_v4l2_cleanup(xdev);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index cc52c1854dbd..d35dc027792a 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -27,6 +27,7 @@
* @notifier: V4L2 asynchronous subdevs notifier
* @dmas: list of DMA channels at the pipeline output and input
* @v4l2_caps: V4L2 capabilities of the whole device (see VIDIOC_QUERYCAP)
+ * @lock: This is to ensure all dma path entities acquire same pipeline object
*/
struct xvip_composite_device {
struct v4l2_device v4l2_dev;
@@ -37,6 +38,9 @@ struct xvip_composite_device {
struct list_head dmas;
u32 v4l2_caps;
+ struct mutex lock; /* lock to protect xvip pipeline instance */
};
+int xvip_graph_start_stop(struct xvip_composite_device *xdev, bool on);
+
#endif /* __XILINX_VIPP_H__ */
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-csc.c b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
new file mode 100644
index 000000000000..c628ee07a1e0
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-csc.c
@@ -0,0 +1,1170 @@
+/*
+ * Xilinx VPSS Color Space Converter
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/xilinx-v4l2-controls.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "xilinx-vip.h"
+
+#define XV_CSC_AP_CTRL (0x000)
+#define XV_CSC_INVIDEOFORMAT (0x010)
+#define XV_CSC_OUTVIDEOFORMAT (0x018)
+#define XV_CSC_WIDTH (0x020)
+#define XV_CSC_HEIGHT (0x028)
+#define XV_CSC_K11 (0x050)
+#define XV_CSC_K12 (0x058)
+#define XV_CSC_K13 (0x060)
+#define XV_CSC_K21 (0x068)
+#define XV_CSC_K22 (0x070)
+#define XV_CSC_K23 (0x078)
+#define XV_CSC_K31 (0x080)
+#define XV_CSC_K32 (0x088)
+#define XV_CSC_K33 (0x090)
+#define XV_CSC_ROFFSET (0x098)
+#define XV_CSC_GOFFSET (0x0a0)
+#define XV_CSC_BOFFSET (0x0a8)
+#define XV_CSC_CLAMPMIN (0x0b0)
+#define XV_CSC_CLIPMAX (0x0b8)
+
+#define XV_CSC_FRACTIONAL_BITS (12)
+#define XV_CSC_SCALE_FACTOR (4096)
+/* This a VPSS CSC specific macro used to calculate Contrast */
+#define XV_CSC_DIVISOR (10000)
+#define XV_CSC_DEFAULT_HEIGHT (720)
+#define XV_CSC_DEFAULT_WIDTH (1280)
+#define XV_CSC_K_MAX_ROWS (3)
+#define XV_CSC_K_MAX_COLUMNS (3)
+#define XV_CSC_MIN_WIDTH (64)
+#define XV_CSC_MAX_WIDTH (8192)
+#define XV_CSC_MIN_HEIGHT (64)
+#define XV_CSC_MAX_HEIGHT (4320)
+
+/* GPIO Reset Assert/De-assert */
+#define XCSC_RESET_ASSERT (1)
+#define XCSC_RESET_DEASSERT (0)
+/* Streaming Macros */
+#define XCSC_CLAMP_MIN_ZERO (0)
+#define XCSC_AP_START BIT(0)
+#define XCSC_AP_AUTO_RESTART BIT(7)
+#define XCSC_STREAM_ON (XCSC_AP_START | XCSC_AP_AUTO_RESTART)
+/* Color Control Macros */
+#define XCSC_COLOR_CTRL_COUNT (5)
+#define XCSC_COLOR_CTRL_DEFAULT (50)
+
+enum xcsc_color_fmt {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+enum xcsc_output_range {
+ XVIDC_CR_0_255 = 1,
+ XVIDC_CR_16_240,
+ XVIDC_CR_16_235
+};
+
+enum xcsc_color_depth {
+ XVIDC_BPC_8 = 8,
+ XVIDC_BPC_10 = 10,
+};
+
+static const s32
+rgb_unity_matrix[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {XV_CSC_SCALE_FACTOR, 0, 0, 0},
+ {0, XV_CSC_SCALE_FACTOR, 0, 0},
+ {0, 0, XV_CSC_SCALE_FACTOR, 0},
+};
+
+static const s32
+ycrcb_to_rgb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 17927 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -2132 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -5329 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 11644 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 21124 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ 0
+ },
+};
+
+static const s32
+rgb_to_ycrcb_unity[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = {
+ {
+ 1826 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 6142 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 620 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ -1006 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3386 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0
+ },
+ {
+ 4392 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -3989 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ -403 * XV_CSC_SCALE_FACTOR / XV_CSC_DIVISOR,
+ 0,
+ },
+};
+
+/**
+ * struct xcsc_dev - xilinx vpss csc device structure
+ * @xvip: Xilinx Video IP core struct
+ * @pads: Media bus pads for VPSS CSC
+ * @formats: Current media bus formats
+ * @default_formats: Default media bus formats for VPSS CSC
+ * @vip_formats: Pointer to DT specified media bus code info
+ * @ctrl_handler: V4L2 Control Handler struct
+ * @custom_ctrls: Array of pointers to various custom controls
+ * @cft_in: IP or Hardware specific input video format
+ * @cft_out: IP or Hardware specific output video format
+ * @output_range: Color range for Outgoing video
+ * @color_depth: Data width used to represent color
+ * @brightness: Expected brightness value
+ * @contrast: Expected contrast value
+ * @red_gain: Expected red gain
+ * @green_gain: Expect green gain
+ * @blue_gain: Expected blue gain
+ * @brightness_active: Current brightness value
+ * @contrast_active: Current contrast value
+ * @red_gain_active: Current red gain
+ * @green_gain_active: Current green gain
+ * @blue_gain_active: Current blue gain
+ * @k_hw : Coefficients to be written to IP/Hardware
+ * @shadow_coeff: Coefficients to track RGB equivalents for color controls
+ * @clip_max: Maximum value to clip output color range
+ * @rst_gpio: Handle to PS GPIO specifier to assert/de-assert the reset line
+ * @max_width: Maximum width supported by IP.
+ * @max_height: Maximum height supported by IP.
+ */
+struct xcsc_dev {
+ struct xvip_device xvip;
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *custom_ctrls[XCSC_COLOR_CTRL_COUNT];
+
+ enum xcsc_color_fmt cft_in;
+ enum xcsc_color_fmt cft_out;
+ enum xcsc_output_range output_range;
+ enum xcsc_color_depth color_depth;
+ s32 brightness;
+ s32 contrast;
+ s32 red_gain;
+ s32 green_gain;
+ s32 blue_gain;
+ s32 brightness_active;
+ s32 contrast_active;
+ s32 red_gain_active;
+ s32 green_gain_active;
+ s32 blue_gain_active;
+ s32 k_hw[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 shadow_coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1];
+ s32 clip_max;
+ struct gpio_desc *rst_gpio;
+ u32 max_width;
+ u32 max_height;
+};
+
+#ifdef DEBUG
+static u32 xcsc_read(struct xcsc_dev *xcsc, u32 reg)
+{
+ u32 data;
+
+ data = xvip_read(&xcsc->xvip, reg);
+ return data;
+}
+
+static void xcsc_get_coeff(struct xcsc_dev *xcsc, s32 C[3][4])
+{
+ C[0][0] = xcsc_read(xcsc, XV_CSC_K11);
+ C[0][1] = xcsc_read(xcsc, XV_CSC_K12);
+ C[0][2] = xcsc_read(xcsc, XV_CSC_K13);
+ C[1][0] = xcsc_read(xcsc, XV_CSC_K21);
+ C[1][1] = xcsc_read(xcsc, XV_CSC_K22);
+ C[1][2] = xcsc_read(xcsc, XV_CSC_K23);
+ C[2][0] = xcsc_read(xcsc, XV_CSC_K31);
+ C[2][1] = xcsc_read(xcsc, XV_CSC_K32);
+ C[2][2] = xcsc_read(xcsc, XV_CSC_K33);
+ C[0][3] = xcsc_read(xcsc, XV_CSC_ROFFSET);
+ C[1][3] = xcsc_read(xcsc, XV_CSC_GOFFSET);
+ C[2][3] = xcsc_read(xcsc, XV_CSC_BOFFSET);
+}
+
+static void xcsc_print_coeff(struct xcsc_dev *xcsc)
+{
+ s32 C[3][4];
+
+ xcsc_get_coeff(xcsc, C);
+
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Start------\n");
+ dev_info(xcsc->xvip.dev,
+ " R row : %5d %5d %5d\n",
+ (s16)C[0][0], (s16)C[0][1], (s16)C[0][2]);
+ dev_info(xcsc->xvip.dev,
+ " G row : %5d %5d %5d\n",
+ (s16)C[1][0], (s16)C[1][1], (s16)C[1][2]);
+ dev_info(xcsc->xvip.dev,
+ " B row : %5d %5d %5d\n",
+ (s16)C[2][0], (s16)C[2][1], (s16)C[2][2]);
+ dev_info(xcsc->xvip.dev,
+ "Offset : %5d %5d %5d\n",
+ (s16)C[0][3], (s16)C[1][3], (s16)C[2][3]);
+ dev_info(xcsc->xvip.dev,
+ "ClampMin: %3d ClipMax %3d",
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN),
+ xcsc_read(xcsc, XV_CSC_CLIPMAX));
+ dev_info(xcsc->xvip.dev,
+ "-------------CSC Coeff Dump Stop-------\n");
+}
+
+static void
+xcsc_log_coeff(struct device *dev,
+ s32 coeff[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ if (!dev)
+ return;
+ dev_dbg(dev, "--- %s : Start Coeff Log ---", __func__);
+ dev_dbg(dev, "R row : %5d %5d %5d\n",
+ coeff[0][0], coeff[0][1], coeff[0][2]);
+ dev_dbg(dev, "G row : %5d %5d %5d\n",
+ coeff[1][0], coeff[1][1], coeff[1][2]);
+ dev_dbg(dev, "B row : %5d %5d %5d\n",
+ coeff[2][0], coeff[2][1], coeff[2][2]);
+ dev_dbg(dev, "Offset: %5d %5d %5d\n",
+ coeff[0][3], coeff[1][3], coeff[2][3]);
+ dev_dbg(dev, "--- %s : Stop Coeff Log ---", __func__);
+}
+
+static void xcsc_print_k_hw(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "-------------CSC Driver k_hw[][] Dump------------\n");
+ xcsc_log_coeff(xcsc->xvip.dev, xcsc->k_hw);
+ dev_dbg(xcsc->xvip.dev,
+ "-------------------------------------------------\n");
+}
+#endif /* DEBUG */
+
+static void xcsc_write(struct xcsc_dev *xcsc, u32 reg, u32 data)
+{
+ xvip_write(&xcsc->xvip, reg, data);
+}
+
+static void xcsc_write_rgb_3x3(struct xcsc_dev *xcsc)
+{
+ /* Write Matrix Coefficients */
+ xcsc_write(xcsc, XV_CSC_K11, xcsc->k_hw[0][0]);
+ xcsc_write(xcsc, XV_CSC_K12, xcsc->k_hw[0][1]);
+ xcsc_write(xcsc, XV_CSC_K13, xcsc->k_hw[0][2]);
+ xcsc_write(xcsc, XV_CSC_K21, xcsc->k_hw[1][0]);
+ xcsc_write(xcsc, XV_CSC_K22, xcsc->k_hw[1][1]);
+ xcsc_write(xcsc, XV_CSC_K23, xcsc->k_hw[1][2]);
+ xcsc_write(xcsc, XV_CSC_K31, xcsc->k_hw[2][0]);
+ xcsc_write(xcsc, XV_CSC_K32, xcsc->k_hw[2][1]);
+ xcsc_write(xcsc, XV_CSC_K33, xcsc->k_hw[2][2]);
+}
+
+static void xcsc_write_rgb_offset(struct xcsc_dev *xcsc)
+{
+ /* Write RGB Offsets */
+ xcsc_write(xcsc, XV_CSC_ROFFSET, xcsc->k_hw[0][3]);
+ xcsc_write(xcsc, XV_CSC_GOFFSET, xcsc->k_hw[1][3]);
+ xcsc_write(xcsc, XV_CSC_BOFFSET, xcsc->k_hw[2][3]);
+}
+
+static void xcsc_write_coeff(struct xcsc_dev *xcsc)
+{
+ xcsc_write_rgb_3x3(xcsc);
+ xcsc_write_rgb_offset(xcsc);
+}
+
+static void xcsc_set_v4l2_ctrl_defaults(struct xcsc_dev *xcsc)
+{
+ unsigned int i;
+
+ mutex_lock(xcsc->ctrl_handler.lock);
+ for (i = 0; i < XCSC_COLOR_CTRL_COUNT; i++)
+ xcsc->custom_ctrls[i]->cur.val = XCSC_COLOR_CTRL_DEFAULT;
+ mutex_unlock(xcsc->ctrl_handler.lock);
+}
+
+static void xcsc_set_control_defaults(struct xcsc_dev *xcsc)
+{
+ /* These are VPSS CSC IP specific defaults */
+ xcsc->brightness = 120;
+ xcsc->contrast = 0;
+ xcsc->red_gain = 120;
+ xcsc->blue_gain = 120;
+ xcsc->green_gain = 120;
+ xcsc->brightness_active = 120;
+ xcsc->contrast_active = 0;
+ xcsc->red_gain_active = 120;
+ xcsc->blue_gain_active = 120;
+ xcsc->green_gain_active = 120;
+}
+
+static void xcsc_copy_coeff(
+ s32 dest[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 const src[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ unsigned int i, j;
+
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++)
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS + 1; j++)
+ memcpy(&dest[i][j], &src[i][j], sizeof(dest[0][0]));
+}
+
+static void xcsc_set_unity_matrix(struct xcsc_dev *xcsc)
+{
+ xcsc_copy_coeff(xcsc->k_hw, rgb_unity_matrix);
+ xcsc_copy_coeff(xcsc->shadow_coeff, rgb_unity_matrix);
+}
+
+static void xcsc_set_default_state(struct xcsc_dev *xcsc)
+{
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ xcsc->output_range = XVIDC_CR_0_255;
+ /* Needed to add 10, 12 and 16 bit color depth support */
+ xcsc->clip_max = BIT(xcsc->color_depth) - 1;
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_unity_matrix(xcsc);
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write_coeff(xcsc);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+}
+
+static void
+xcsc_ycrcb_to_rgb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing YCrCb to RGB BT 709");
+ xcsc_copy_coeff(temp, ycrcb_to_rgb_unity);
+ temp[0][3] = -248 * bpc_scale;
+ temp[1][3] = 77 * bpc_scale;
+ temp[2][3] = -289 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static void
+xcsc_matrix_multiply(s32 K1[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 K2[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1],
+ s32 kout[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 A, B, C, D, E, F, G, H, I, J, K, L, M, N;
+ s32 O, P, Q, R, S, T, U, V, W, X;
+
+ A = K1[0][0]; B = K1[0][1]; C = K1[0][2]; J = K1[0][3];
+ D = K1[1][0]; E = K1[1][1]; F = K1[1][2]; K = K1[1][3];
+ G = K1[2][0]; H = K1[2][1]; I = K1[2][2]; L = K1[2][3];
+
+ M = K2[0][0]; N = K2[0][1]; O = K2[0][2]; V = K2[0][3];
+ P = K2[1][0]; Q = K2[1][1]; R = K2[1][2]; W = K2[1][3];
+ S = K2[2][0]; T = K2[2][1]; U = K2[2][2]; X = K2[2][3];
+
+ kout[0][0] = (M * A + N * D + O * G) / XV_CSC_SCALE_FACTOR;
+ kout[0][1] = (M * B + N * E + O * H) / XV_CSC_SCALE_FACTOR;
+ kout[0][2] = (M * C + N * F + O * I) / XV_CSC_SCALE_FACTOR;
+ kout[1][0] = (P * A + Q * D + R * G) / XV_CSC_SCALE_FACTOR;
+ kout[1][1] = (P * B + Q * E + R * H) / XV_CSC_SCALE_FACTOR;
+ kout[1][2] = (P * C + Q * F + R * I) / XV_CSC_SCALE_FACTOR;
+ kout[2][0] = (S * A + T * D + U * G) / XV_CSC_SCALE_FACTOR;
+ kout[2][1] = (S * B + T * E + U * H) / XV_CSC_SCALE_FACTOR;
+ kout[2][2] = (S * C + T * F + U * I) / XV_CSC_SCALE_FACTOR;
+ kout[0][3] = ((M * J + N * K + O * L) / XV_CSC_SCALE_FACTOR) + V;
+ kout[1][3] = ((P * J + Q * K + R * L) / XV_CSC_SCALE_FACTOR) + W;
+ kout[2][3] = ((S * J + T * K + U * L) / XV_CSC_SCALE_FACTOR) + X;
+}
+
+static void
+xcsc_rgb_to_ycrcb(struct xcsc_dev *xcsc, s32 *clip_max,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ u16 bpc_scale = BIT(xcsc->color_depth - 8);
+
+ /*
+ * See http://graficaobscura.com/matrix/index.html for
+ * how these numbers are derived. The VPSS CSC IP is
+ * derived from this Matrix style algorithm. And the
+ * 'magic' numbers here are derived from the algorithm.
+ *
+ * XV_CSC_DIVISOR is used to help with floating constants
+ * while performing multiplicative operations
+ *
+ * Coefficients valid only for BT 709
+ */
+ dev_dbg(xcsc->xvip.dev, "Performing RGB to YCrCb BT 709");
+ xcsc_copy_coeff(temp, rgb_to_ycrcb_unity);
+ temp[0][3] = 16 * bpc_scale;
+ temp[1][3] = 128 * bpc_scale;
+ temp[2][3] = 128 * bpc_scale;
+ *clip_max = BIT(xcsc->color_depth) - 1;
+}
+
+static int xcsc_update_formats(struct xcsc_dev *xcsc)
+{
+ u32 color_in, color_out;
+
+ /* Write In and Out Video Formats */
+ color_in = xcsc->formats[XVIP_PAD_SINK].code;
+ color_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+ switch (color_in) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : RGB");
+ xcsc->cft_in = XVIDC_CSF_RGB;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 444");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 422");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xcsc->xvip.dev, "Media Format In : YUV 420");
+ xcsc->cft_in = XVIDC_CSF_YCRCB_420;
+ break;
+ }
+
+ switch (color_out) {
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ xcsc->cft_out = XVIDC_CSF_RGB;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : RGB");
+ if (color_in != MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_444;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 444");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_422;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 422");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ xcsc->cft_out = XVIDC_CSF_YCRCB_420;
+ dev_dbg(xcsc->xvip.dev, "Media Format Out : YUV 420");
+ if (color_in == MEDIA_BUS_FMT_RBG888_1X24)
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, xcsc->k_hw);
+ else
+ xcsc_set_unity_matrix(xcsc);
+ break;
+ }
+
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+
+ xcsc_write_coeff(xcsc);
+
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static inline struct xcsc_dev *to_csc(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xcsc_dev, xvip.subdev);
+}
+
+static struct v4l2_mbus_framefmt *
+__xcsc_get_pad_format(struct xcsc_dev *xcsc,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xcsc->xvip.subdev, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xcsc->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static void
+xcsc_correct_coeff(struct xcsc_dev *xcsc,
+ s32 temp[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1])
+{
+ s32 csc_change[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ s32 csc_extra[XV_CSC_K_MAX_ROWS][XV_CSC_K_MAX_COLUMNS + 1] = { {0} };
+ u32 mbus_in = xcsc->formats[XVIP_PAD_SINK].code;
+ u32 mbus_out = xcsc->formats[XVIP_PAD_SOURCE].code;
+
+#ifdef DEBUG
+ xcsc_log_coeff(xcsc->xvip.dev, temp);
+#endif
+ if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 && mbus_out == mbus_in) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to RGB", __func__);
+ xcsc_copy_coeff(xcsc->k_hw,
+ (const s32 (*)[XV_CSC_K_MAX_COLUMNS + 1])temp);
+ } else if (mbus_in == MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : RGB to YUV", __func__);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(temp, csc_change, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out == MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to RGB", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, xcsc->k_hw);
+ } else if (mbus_in != MEDIA_BUS_FMT_RBG888_1X24 &&
+ mbus_out != MEDIA_BUS_FMT_RBG888_1X24) {
+ dev_dbg(xcsc->xvip.dev, "%s : YUV to YUV", __func__);
+ xcsc_ycrcb_to_rgb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_change, temp, csc_extra);
+ xcsc_rgb_to_ycrcb(xcsc, &xcsc->clip_max, csc_change);
+ xcsc_matrix_multiply(csc_extra, csc_change, xcsc->k_hw);
+ } else {
+ /* Should never get here */
+ WARN_ON(1);
+ }
+}
+
+static void xcsc_set_brightness(struct xcsc_dev *xcsc)
+{
+ unsigned int i, j;
+
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Brightness %d Brightness Active %d",
+ __func__,
+ ((xcsc->brightness - 20) / 2),
+ ((xcsc->brightness_active - 20) / 2));
+ if (xcsc->brightness == xcsc->brightness_active)
+ return;
+ for (i = 0; i < XV_CSC_K_MAX_ROWS; i++) {
+ for (j = 0; j < XV_CSC_K_MAX_COLUMNS; j++) {
+ xcsc->shadow_coeff[i][j] = (xcsc->shadow_coeff[i][j] *
+ xcsc->brightness) /
+ xcsc->brightness_active;
+ }
+ }
+ xcsc->brightness_active = xcsc->brightness;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_contrast(struct xcsc_dev *xcsc)
+{
+ s32 contrast;
+ u8 scale = BIT(xcsc->color_depth - 8);
+
+ contrast = xcsc->contrast - xcsc->contrast_active;
+ dev_dbg(xcsc->xvip.dev,
+ "%s : Contrast Difference %d scale = %d",
+ __func__, contrast, scale);
+ /* Avoid updates if same */
+ if (!contrast)
+ return;
+ /* Update RGB Offsets */
+ xcsc->shadow_coeff[0][3] += contrast * scale;
+ xcsc->shadow_coeff[1][3] += contrast * scale;
+ xcsc->shadow_coeff[2][3] += contrast * scale;
+ xcsc->contrast_active = xcsc->contrast;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+}
+
+static void xcsc_set_red_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Red Gain %d Red Gain Active %d", __func__,
+ (xcsc->red_gain - 20) / 2,
+ (xcsc->red_gain_active - 20) / 2);
+
+ if (xcsc->red_gain != xcsc->red_gain_active) {
+ xcsc->shadow_coeff[0][0] = (xcsc->shadow_coeff[0][0] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][1] = (xcsc->shadow_coeff[0][1] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->shadow_coeff[0][2] = (xcsc->shadow_coeff[0][2] *
+ xcsc->red_gain) /
+ xcsc->red_gain_active;
+ xcsc->red_gain_active = xcsc->red_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_green_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Green Gain %d Green Gain Active %d", __func__,
+ (xcsc->green_gain - 20) / 2,
+ (xcsc->green_gain_active - 20) / 2);
+
+ if (xcsc->green_gain != xcsc->green_gain_active) {
+ xcsc->shadow_coeff[1][0] = (xcsc->shadow_coeff[1][0] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][1] = (xcsc->shadow_coeff[1][1] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->shadow_coeff[1][2] = (xcsc->shadow_coeff[1][2] *
+ xcsc->green_gain) /
+ xcsc->green_gain_active;
+ xcsc->green_gain_active = xcsc->green_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_blue_gain(struct xcsc_dev *xcsc)
+{
+ dev_dbg(xcsc->xvip.dev,
+ "%s: Blue Gain %d Blue Gain Active %d", __func__,
+ (xcsc->blue_gain - 20) / 2,
+ (xcsc->blue_gain_active - 20) / 2);
+
+ if (xcsc->blue_gain != xcsc->blue_gain_active) {
+ xcsc->shadow_coeff[2][0] = (xcsc->shadow_coeff[2][0] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][1] = (xcsc->shadow_coeff[2][1] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->shadow_coeff[2][2] = (xcsc->shadow_coeff[2][2] *
+ xcsc->blue_gain) /
+ xcsc->blue_gain_active;
+ xcsc->blue_gain_active = xcsc->blue_gain;
+ xcsc_correct_coeff(xcsc, xcsc->shadow_coeff);
+ xcsc_write_coeff(xcsc);
+ }
+}
+
+static void xcsc_set_size(struct xcsc_dev *xcsc)
+{
+ u32 width, height;
+
+ width = xcsc->formats[XVIP_PAD_SINK].width;
+ height = xcsc->formats[XVIP_PAD_SINK].height;
+ dev_dbg(xcsc->xvip.dev, "%s : Setting width %d and height %d",
+ __func__, width, height);
+ xcsc_write(xcsc, XV_CSC_WIDTH, width);
+ xcsc_write(xcsc, XV_CSC_HEIGHT, height);
+}
+
+static int xcsc_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ dev_dbg(xcsc->xvip.dev, "%s : Stream %s", __func__,
+ enable ? "On" : "Off");
+ if (!enable) {
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_ASSERT);
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ return 0;
+ }
+ xcsc_write(xcsc, XV_CSC_INVIDEOFORMAT, xcsc->cft_in);
+ xcsc_write(xcsc, XV_CSC_OUTVIDEOFORMAT, xcsc->cft_out);
+ xcsc_write(xcsc, XV_CSC_CLIPMAX, xcsc->clip_max);
+ xcsc_write(xcsc, XV_CSC_CLAMPMIN, XCSC_CLAMP_MIN_ZERO);
+ xcsc_set_size(xcsc);
+ xcsc_write_coeff(xcsc);
+#ifdef DEBUG
+ xcsc_print_coeff(xcsc);
+ dev_dbg(xcsc->xvip.dev, "cft_in = %d cft_out = %d",
+ xcsc_read(xcsc, XV_CSC_INVIDEOFORMAT),
+ xcsc_read(xcsc, XV_CSC_OUTVIDEOFORMAT));
+ dev_dbg(xcsc->xvip.dev, "clipmax = %d clampmin = %d",
+ xcsc_read(xcsc, XV_CSC_CLIPMAX),
+ xcsc_read(xcsc, XV_CSC_CLAMPMIN));
+ dev_dbg(xcsc->xvip.dev, "height = %d width = %d",
+ xcsc_read(xcsc, XV_CSC_HEIGHT),
+ xcsc_read(xcsc, XV_CSC_WIDTH));
+#endif
+ /* Start VPSS CSC IP */
+ xcsc_write(xcsc, XV_CSC_AP_CTRL, XCSC_STREAM_ON);
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops xcsc_video_ops = {
+ .s_stream = xcsc_s_stream,
+};
+
+static int xcsc_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+
+ fmt->format = *__xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ return 0;
+}
+
+static int xcsc_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_mbus_framefmt *__propagate;
+
+ __format = __xcsc_get_pad_format(xcsc, cfg, fmt->pad, fmt->which);
+ /* Propagate to Source Pad */
+ __propagate = __xcsc_get_pad_format(xcsc, cfg,
+ XVIP_PAD_SOURCE, fmt->which);
+ *__format = fmt->format;
+
+ __format->width = clamp_t(unsigned int, fmt->format.width,
+ XV_CSC_MIN_WIDTH, xcsc->max_width);
+ __format->height = clamp_t(unsigned int, fmt->format.height,
+ XV_CSC_MIN_HEIGHT, xcsc->max_height);
+
+ switch (__format->code) {
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ break;
+ default:
+ /* Unsupported Format. Default to RGB */
+ __format->code = MEDIA_BUS_FMT_RBG888_1X24;
+ return -EINVAL;
+ }
+
+ /* Always propagate Sink image size to Source */
+ __propagate->width = __format->width;
+ __propagate->height = __format->height;
+
+ fmt->format = *__format;
+ xcsc_update_formats(xcsc);
+ xcsc_set_control_defaults(xcsc);
+ xcsc_set_v4l2_ctrl_defaults(xcsc);
+ dev_info(xcsc->xvip.dev, "VPSS CSC color controls reset to defaults");
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops xcsc_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xvip_enum_frame_size,
+ .get_fmt = xcsc_get_format,
+ .set_fmt = xcsc_set_format,
+};
+
+static const struct v4l2_subdev_ops xcsc_ops = {
+ .video = &xcsc_video_ops,
+ .pad = &xcsc_pad_ops
+};
+
+static int xcsc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct xcsc_dev *xcsc = container_of(ctrl->handler,
+ struct xcsc_dev,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_XILINX_CSC_BRIGHTNESS:
+ xcsc->brightness = (2 * ctrl->val) + 20;
+ xcsc_set_brightness(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_CONTRAST:
+ xcsc->contrast = (4 * ctrl->val) - 200;
+ xcsc_set_contrast(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_RED_GAIN:
+ xcsc->red_gain = (2 * ctrl->val) + 20;
+ xcsc_set_red_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_BLUE_GAIN:
+ xcsc->blue_gain = (2 * ctrl->val) + 20;
+ xcsc_set_blue_gain(xcsc);
+ break;
+ case V4L2_CID_XILINX_CSC_GREEN_GAIN:
+ xcsc->green_gain = (2 * ctrl->val) + 20;
+ xcsc_set_green_gain(xcsc);
+ break;
+ }
+#ifdef DEBUG
+ xcsc_print_k_hw(xcsc);
+ xcsc_print_coeff(xcsc);
+#endif
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops xcsc_ctrl_ops = {
+ .s_ctrl = xcsc_s_ctrl,
+};
+
+static struct v4l2_ctrl_config xcsc_color_ctrls[XCSC_COLOR_CTRL_COUNT] = {
+ /* Brightness */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BRIGHTNESS,
+ .name = "CSC Brightness",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Contrast */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_CONTRAST,
+ .name = "CSC Contrast",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Red Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_RED_GAIN,
+ .name = "CSC Red Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Blue Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_BLUE_GAIN,
+ .name = "CSC Blue Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ /* Green Gain */
+ {
+ .ops = &xcsc_ctrl_ops,
+ .id = V4L2_CID_XILINX_CSC_GREEN_GAIN,
+ .name = "CSC Green Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = XCSC_COLOR_CTRL_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+};
+
+static int xcsc_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct xcsc_dev *xcsc = to_csc(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xcsc->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xcsc->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int xcsc_close(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops xcsc_internal_ops = {
+ .open = xcsc_open,
+ .close = xcsc_close,
+};
+
+static const struct media_entity_operations xcsc_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int xcsc_parse_of(struct xcsc_dev *xcsc)
+{
+ struct device *dev = xcsc->xvip.dev;
+ struct device_node *node = xcsc->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports, *port;
+ int rval;
+ u32 port_id = 0;
+ u32 video_width[2];
+
+ rval = of_property_read_u32(node, "xlnx,max-height", &xcsc->max_height);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_height > XV_CSC_MAX_HEIGHT ||
+ xcsc->max_height < XV_CSC_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ rval = of_property_read_u32(node, "xlnx,max-width", &xcsc->max_width);
+ if (rval < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xcsc->max_width > XV_CSC_MAX_WIDTH ||
+ xcsc->max_width < XV_CSC_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "Invalid media pad format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ rval = of_property_read_u32(port, "reg", &port_id);
+ if (rval < 0) {
+ dev_err(dev, "No reg in DT to specify pad");
+ return rval;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xcsc->vip_formats[port_id] = vip_format;
+
+ rval = of_property_read_u32(port, "xlnx,video-width",
+ &video_width[port_id]);
+ if (rval < 0) {
+ dev_err(dev,
+ "DT Port%d xlnx,video-width not found",
+ port_id);
+ return rval;
+ }
+ }
+ }
+ if (video_width[0] != video_width[1]) {
+ dev_err(dev, "Changing video width in DT not supported");
+ return -EINVAL;
+ }
+ switch (video_width[0]) {
+ case XVIDC_BPC_8:
+ case XVIDC_BPC_10:
+ xcsc->color_depth = video_width[0];
+ break;
+ default:
+ dev_err(dev, "Unsupported color depth %d", video_width[0]);
+ return -EINVAL;
+ }
+ /* Reset GPIO */
+ xcsc->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xcsc->rst_gpio)) {
+ if (PTR_ERR(xcsc->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xcsc->rst_gpio);
+ }
+ return 0;
+}
+
+static int xcsc_probe(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *def_fmt;
+ int rval, itr;
+
+ xcsc = devm_kzalloc(&pdev->dev, sizeof(*xcsc), GFP_KERNEL);
+ if (!xcsc)
+ return -ENOMEM;
+
+ xcsc->xvip.dev = &pdev->dev;
+
+ rval = xcsc_parse_of(xcsc);
+ if (rval < 0)
+ return rval;
+
+ /* Reset and initialize the core */
+ gpiod_set_value_cansleep(xcsc->rst_gpio, XCSC_RESET_DEASSERT);
+ rval = xvip_init_resources(&xcsc->xvip);
+ if (rval < 0)
+ return rval;
+
+ /* Init v4l2 subdev */
+ subdev = &xcsc->xvip.subdev;
+ v4l2_subdev_init(subdev, &xcsc_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xcsc_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xcsc);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Default Formats Initialization */
+ xcsc_set_default_state(xcsc);
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SINK]->code;
+ def_fmt->field = V4L2_FIELD_NONE;
+ def_fmt->colorspace = V4L2_COLORSPACE_REC709;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SINK] = *def_fmt;
+ /* Source supports only YUV 444, YUV 422, and RGB */
+ def_fmt = &xcsc->default_formats[XVIP_PAD_SOURCE];
+ *def_fmt = xcsc->default_formats[XVIP_PAD_SINK];
+ def_fmt->code = xcsc->vip_formats[XVIP_PAD_SOURCE]->code;
+ def_fmt->width = XV_CSC_DEFAULT_WIDTH;
+ def_fmt->height = XV_CSC_DEFAULT_HEIGHT;
+ xcsc->formats[XVIP_PAD_SOURCE] = *def_fmt;
+ xcsc->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xcsc->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Init Media Entity */
+ subdev->entity.ops = &xcsc_media_ops;
+ rval = media_entity_pads_init(&subdev->entity, 2, xcsc->pads);
+ if (rval < 0)
+ goto media_error;
+ /* V4L2 Control Setup */
+ v4l2_ctrl_handler_init(&xcsc->ctrl_handler,
+ ARRAY_SIZE(xcsc_color_ctrls));
+ for (itr = 0; itr < ARRAY_SIZE(xcsc_color_ctrls); itr++) {
+ xcsc->custom_ctrls[itr] =
+ v4l2_ctrl_new_custom(&xcsc->ctrl_handler,
+ &xcsc_color_ctrls[itr], NULL);
+ }
+ if (xcsc->ctrl_handler.error) {
+ dev_err(&pdev->dev, "Failed to add v4l2 controls");
+ rval = xcsc->ctrl_handler.error;
+ goto ctrl_error;
+ }
+ subdev->ctrl_handler = &xcsc->ctrl_handler;
+ rval = v4l2_ctrl_handler_setup(&xcsc->ctrl_handler);
+ if (rval < 0) {
+ dev_err(xcsc->xvip.dev, "Failed to setup control handler");
+ goto ctrl_error;
+ }
+ platform_set_drvdata(pdev, xcsc);
+ rval = v4l2_async_register_subdev(subdev);
+ if (rval < 0) {
+ dev_err(&pdev->dev, "failed to register subdev\n");
+ goto ctrl_error;
+ }
+ dev_info(&pdev->dev, "VPSS CSC %d-bit Color Depth Probe Successful",
+ xcsc->color_depth);
+ return 0;
+ctrl_error:
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+media_error:
+ xvip_cleanup_resources(&xcsc->xvip);
+ return rval;
+}
+
+static int xcsc_remove(struct platform_device *pdev)
+{
+ struct xcsc_dev *xcsc = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xcsc->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ v4l2_ctrl_handler_free(&xcsc->ctrl_handler);
+ media_entity_cleanup(&subdev->entity);
+ xvip_cleanup_resources(&xcsc->xvip);
+ return 0;
+}
+
+static const struct of_device_id xcsc_of_id_table[] = {
+ {.compatible = "xlnx,v-vpss-csc"},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, xcsc_of_id_table);
+
+static struct platform_driver xcsc_driver = {
+ .driver = {
+ .name = "xilinx-vpss-csc",
+ .of_match_table = xcsc_of_id_table,
+ },
+ .probe = xcsc_probe,
+ .remove = xcsc_remove,
+};
+
+module_platform_driver(xcsc_driver);
+MODULE_DESCRIPTION("Xilinx VPSS CSC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vpss-scaler.c b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
new file mode 100644
index 000000000000..6a4ef88bea12
--- /dev/null
+++ b/drivers/media/platform/xilinx/xilinx-vpss-scaler.c
@@ -0,0 +1,2108 @@
+/*
+ * Xilinx VPSS Scaler
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-subdev.h>
+#include "xilinx-vip.h"
+
+#define XSCALER_MIN_WIDTH (64)
+#define XSCALER_MAX_WIDTH (8192)
+#define XSCALER_MIN_HEIGHT (64)
+#define XSCALER_MAX_HEIGHT (4320)
+#define XSCALER_MAX_PHASES (64)
+
+/* Modify to defaults incase it is not configured from application */
+#define XSCALER_DEF_IN_HEIGHT (720)
+#define XSCALER_DEF_IN_WIDTH (1280)
+#define XSCALER_DEF_OUT_HEIGHT (1080)
+#define XSCALER_DEF_OUT_WIDTH (1920)
+
+#define XSCALER_HSF (0x0100)
+#define XSCALER_VSF (0x0104)
+#define XSCALER_SF_SHIFT (20)
+#define XSCALER_SF_MASK (0xffffff)
+#define XSCALER_SOURCE_SIZE (0x0108)
+#define XSCALER_SIZE_HORZ_SHIFT (0)
+#define XSCALER_SIZE_VERT_SHIFT (16)
+#define XSCALER_SIZE_MASK (0xfff)
+#define XSCALER_HAPERTURE (0x010c)
+#define XSCALER_VAPERTURE (0x0110)
+#define XSCALER_APERTURE_START_SHIFT (0)
+#define XSCALER_APERTURE_END_SHIFT (16)
+#define XSCALER_OUTPUT_SIZE (0x0114)
+#define XSCALER_COEF_DATA_IN (0x0134)
+#define XSCALER_BITSHIFT_16 (16)
+
+/* Video subsytems block offset */
+#define S_AXIS_RESET_OFF (0x00010000)
+#define V_HSCALER_OFF (0x00000000)
+#define V_VSCALER_OFF (0x00020000)
+
+/* HW Reset Network GPIO Channel */
+#define XGPIO_CH_RESET_SEL (1)
+#define XGPIO_RESET_MASK_VIDEO_IN BIT(0)
+#define XGPIO_RESET_MASK_IP_AXIS BIT(1)
+#define XGPIO_RESET_MASK_IP_AXIMM BIT(0)
+#define XGPIO_RESET_MASK_ALL_BLOCKS (XGPIO_RESET_MASK_VIDEO_IN | \
+ XGPIO_RESET_MASK_IP_AXIS)
+#define XGPIO_DATA_OFFSET (0x0)
+#define XGPIO_TRI_OFFSET (0x4)
+#define XGPIO_DATA2_OFFSET (0x8)
+#define XGPIO_TRI2_OFFSET (0xc)
+
+#define XGPIO_GIE_OFFSET (0x11c)
+#define XGPIO_ISR_OFFSET (0x120)
+#define XGPIO_IER_OFFSET (0x128)
+#define XGPIO_CHAN_OFFSET (8)
+#define STEP_PRECISION (65536)
+
+/* Video IP Formats */
+enum xscaler_vid_reg_fmts {
+ XVIDC_CSF_RGB = 0,
+ XVIDC_CSF_YCRCB_444,
+ XVIDC_CSF_YCRCB_422,
+ XVIDC_CSF_YCRCB_420,
+};
+
+/* Video IP PPC */
+#define XSCALER_PPC_1 (1)
+#define XSCALER_PPC_2 (2)
+#define XSCALER_PPC_4 (4)
+
+#define XV_HSCALER_MAX_H_TAPS (12)
+#define XV_HSCALER_MAX_H_PHASES (64)
+#define XV_HSCALER_MAX_LINE_WIDTH (3840)
+#define XV_VSCALER_MAX_V_TAPS (12)
+#define XV_VSCALER_MAX_V_PHASES (64)
+
+#define XV_HSCALER_TAPS_2 (2)
+#define XV_HSCALER_TAPS_4 (4)
+#define XV_HSCALER_TAPS_6 (6)
+#define XV_HSCALER_TAPS_8 (8)
+#define XV_HSCALER_TAPS_10 (10)
+#define XV_HSCALER_TAPS_12 (12)
+#define XV_VSCALER_TAPS_2 (2)
+#define XV_VSCALER_TAPS_4 (4)
+#define XV_VSCALER_TAPS_6 (6)
+#define XV_VSCALER_TAPS_8 (8)
+#define XV_VSCALER_TAPS_10 (10)
+#define XV_VSCALER_TAPS_12 (12)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XHSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XHSC_MASK_HIGH_16BITS GENMASK(31, 16)
+#define XHSC_MASK_LOW_32BITS GENMASK(31, 0)
+#define XHSC_STEP_PRECISION_SHIFT (16)
+#define XHSC_HPHASE_SHIFT_BY_6 (6)
+#define XHSC_HPHASE_MULTIPLIER (9)
+#define XHSC_HPHASE_MUL_4PPC (10)
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVSC_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVSC_MASK_HIGH_16BITS GENMASK(31, 16)
+
+/* XSCALER POWER MACROS */
+#define XSCALER_RESET_ASSERT (0x1)
+#define XSCALER_RESET_DEASSERT (0x0)
+
+/* Scaler AP Control Registers */
+#define XSCALER_START BIT(0)
+#define XSCALER_AUTO_RESTART BIT(7)
+#define XSCALER_STREAM_ON (XSCALER_START | XSCALER_AUTO_RESTART)
+
+/* H-scaler registers */
+#define XV_HSCALER_CTRL_ADDR_AP_CTRL (0x0000)
+#define XV_HSCALER_CTRL_ADDR_GIE (0x0004)
+#define XV_HSCALER_CTRL_ADDR_IER (0x0008)
+#define XV_HSCALER_CTRL_ADDR_ISR (0x000c)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA (0x0010)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA (0x0018)
+#define XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA (0x0020)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x0028)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA (0x0030)
+#define XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA (0X0038)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE (0x0800)
+#define XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_HIGH (0x0bff)
+
+/* Coefficients for 6, 8, 10 and 12 tap filters */
+
+static const s16 XV_lanczos2_taps6[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 0, 0, 4096, 0, 0, 0, },
+ { 0, -40, 4099, 42, 0, -5, },
+ { -1, -77, 4097, 87, -1, -9, },
+ { -2, -111, 4092, 134, -2, -15, },
+ { -4, -143, 4082, 184, -4, -19, },
+ { -6, -173, 4068, 237, -7, -23, },
+ { -8, -201, 4051, 292, -10, -28, },
+ { -11, -226, 4029, 350, -13, -33, },
+ { -14, -248, 4003, 411, -18, -38, },
+ { -17, -269, 3974, 474, -23, -43, },
+ { -21, -287, 3940, 539, -28, -47, },
+ { -24, -303, 3903, 608, -34, -54, },
+ { -28, -317, 3862, 678, -41, -58, },
+ { -32, -329, 3817, 751, -49, -62, },
+ { -37, -339, 3768, 826, -57, -65, },
+ { -41, -347, 3716, 903, -65, -70, },
+ { -45, -353, 3661, 982, -75, -74, },
+ { -50, -358, 3602, 1063, -84, -77, },
+ { -54, -361, 3539, 1146, -95, -79, },
+ { -58, -362, 3474, 1230, -106, -82, },
+ { -62, -361, 3406, 1317, -117, -87, },
+ { -66, -359, 3335, 1404, -128, -90, },
+ { -70, -356, 3261, 1493, -140, -92, },
+ { -74, -351, 3185, 1583, -153, -94, },
+ { -77, -346, 3106, 1673, -165, -95, },
+ { -81, -339, 3025, 1765, -178, -96, },
+ { -84, -331, 2942, 1857, -191, -97, },
+ { -87, -322, 2858, 1950, -204, -99, },
+ { -89, -313, 2771, 2043, -217, -99, },
+ { -92, -302, 2683, 2136, -230, -99, },
+ { -94, -292, 2594, 2228, -243, -97, },
+ { -95, -280, 2504, 2321, -256, -98, },
+ { -97, -268, 2413, 2413, -268, -97, },
+ { -97, -256, 2321, 2504, -280, -96, },
+ { -98, -243, 2228, 2594, -292, -93, },
+ { -98, -230, 2136, 2683, -302, -93, },
+ { -98, -217, 2043, 2771, -313, -90, },
+ { -98, -204, 1950, 2858, -322, -88, },
+ { -97, -191, 1857, 2942, -331, -84, },
+ { -96, -178, 1765, 3025, -339, -81, },
+ { -95, -165, 1673, 3106, -346, -77, },
+ { -93, -153, 1583, 3185, -351, -75, },
+ { -91, -140, 1493, 3261, -356, -71, },
+ { -89, -128, 1404, 3335, -359, -67, },
+ { -86, -117, 1317, 3406, -361, -63, },
+ { -83, -106, 1230, 3474, -362, -57, },
+ { -80, -95, 1146, 3539, -361, -53, },
+ { -77, -84, 1063, 3602, -358, -50, },
+ { -73, -75, 982, 3661, -353, -46, },
+ { -69, -65, 903, 3716, -347, -42, },
+ { -65, -57, 826, 3768, -339, -37, },
+ { -61, -49, 751, 3817, -329, -33, },
+ { -57, -41, 678, 3862, -317, -29, },
+ { -52, -34, 608, 3903, -303, -26, },
+ { -47, -28, 539, 3940, -287, -21, },
+ { -43, -23, 474, 3974, -269, -17, },
+ { -38, -18, 411, 4003, -248, -14, },
+ { -33, -13, 350, 4029, -226, -11, },
+ { -28, -10, 292, 4051, -201, -8, },
+ { -24, -7, 237, 4068, -173, -5, },
+ { -19, -4, 184, 4082, -143, -4, },
+ { -14, -2, 134, 4092, -111, -3, },
+ { -9, -1, 87, 4097, -77, -1, },
+ { -5, 0, 42, 4099, -40, 0, }
+};
+
+/* ScalingRatio = 1.25 */
+static const s16 XV_fixedcoeff_taps6_SR1p2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { -102, 512, 3208, 512, -102, 68, },
+ { -97, 471, 3209, 555, -107, 65, },
+ { -92, 431, 3208, 599, -113, 63, },
+ { -87, 392, 3205, 645, -118, 59, },
+ { -82, 354, 3199, 691, -124, 58, },
+ { -77, 318, 3191, 739, -130, 55, },
+ { -72, 282, 3181, 788, -136, 53, },
+ { -68, 248, 3169, 838, -141, 50, },
+ { -64, 216, 3155, 889, -147, 47, },
+ { -59, 184, 3139, 941, -153, 44, },
+ { -55, 154, 3120, 993, -158, 42, },
+ { -52, 125, 3100, 1047, -164, 40, },
+ { -48, 98, 3077, 1101, -169, 37, },
+ { -44, 71, 3052, 1157, -174, 34, },
+ { -41, 46, 3025, 1212, -180, 34, },
+ { -38, 23, 2996, 1269, -184, 30, },
+ { -35, 0, 2965, 1326, -189, 29, },
+ { -32, -21, 2933, 1383, -193, 26, },
+ { -29, -41, 2898, 1441, -198, 25, },
+ { -26, -60, 2862, 1500, -201, 21, },
+ { -24, -78, 2823, 1558, -205, 22, },
+ { -21, -94, 2784, 1617, -208, 18, },
+ { -19, -109, 2742, 1676, -210, 16, },
+ { -17, -123, 2699, 1734, -212, 15, },
+ { -14, -136, 2654, 1793, -214, 13, },
+ { -12, -148, 2608, 1852, -214, 10, },
+ { -10, -159, 2560, 1910, -215, 10, },
+ { -9, -168, 2512, 1968, -215, 8, },
+ { -7, -177, 2461, 2026, -214, 7, },
+ { -5, -185, 2410, 2083, -212, 5, },
+ { -3, -192, 2358, 2139, -209, 3, },
+ { -2, -197, 2304, 2195, -206, 2, },
+ { 0, -202, 2250, 2250, -202, 0, },
+ { 2, -206, 2195, 2304, -197, -2, },
+ { 3, -209, 2139, 2358, -192, -3, },
+ { 5, -212, 2083, 2410, -185, -5, },
+ { 6, -214, 2026, 2461, -177, -6, },
+ { 8, -215, 1968, 2512, -168, -9, },
+ { 10, -215, 1910, 2560, -159, -10, },
+ { 11, -214, 1852, 2608, -148, -13, },
+ { 13, -214, 1793, 2654, -136, -14, },
+ { 15, -212, 1734, 2699, -123, -17, },
+ { 17, -210, 1676, 2742, -109, -20, },
+ { 18, -208, 1617, 2784, -94, -21, },
+ { 20, -205, 1558, 2823, -78, -22, },
+ { 22, -201, 1500, 2862, -60, -27, },
+ { 24, -198, 1441, 2898, -41, -28, },
+ { 26, -193, 1383, 2933, -21, -32, },
+ { 28, -189, 1326, 2965, 0, -34, },
+ { 30, -184, 1269, 2996, 23, -38, },
+ { 33, -180, 1212, 3025, 46, -40, },
+ { 35, -174, 1157, 3052, 71, -45, },
+ { 37, -169, 1101, 3077, 98, -48, },
+ { 40, -164, 1047, 3100, 125, -52, },
+ { 42, -158, 993, 3120, 154, -55, },
+ { 44, -153, 941, 3139, 184, -59, },
+ { 47, -147, 889, 3155, 216, -64, },
+ { 50, -141, 838, 3169, 248, -68, },
+ { 52, -136, 788, 3181, 282, -71, },
+ { 55, -130, 739, 3191, 318, -77, },
+ { 57, -124, 691, 3199, 354, -81, },
+ { 60, -118, 645, 3205, 392, -88, },
+ { 63, -113, 599, 3208, 431, -92, },
+ { 65, -107, 555, 3209, 471, -97, }
+};
+
+/* ScalingRatio = 2.0 */
+static const s16 XV_fixedcoeff_taps6_SR2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 0, 970, 2235, 970, 0, -79, },
+ { -3, 943, 2233, 997, 3, -77, },
+ { -5, 915, 2231, 1025, 6, -76, },
+ { -8, 888, 2227, 1052, 10, -73, },
+ { -10, 861, 2223, 1079, 14, -71, },
+ { -12, 834, 2218, 1107, 18, -69, },
+ { -14, 808, 2213, 1134, 22, -67, },
+ { -15, 782, 2206, 1162, 27, -66, },
+ { -17, 756, 2199, 1189, 32, -63, },
+ { -18, 731, 2191, 1217, 37, -62, },
+ { -20, 706, 2182, 1245, 42, -59, },
+ { -21, 681, 2172, 1272, 48, -56, },
+ { -22, 657, 2162, 1300, 55, -56, },
+ { -22, 633, 2151, 1327, 61, -54, },
+ { -23, 609, 2139, 1355, 68, -52, },
+ { -24, 586, 2126, 1382, 76, -50, },
+ { -25, 564, 2113, 1410, 83, -49, },
+ { -25, 541, 2099, 1437, 91, -47, },
+ { -26, 520, 2084, 1464, 100, -46, },
+ { -26, 498, 2069, 1491, 109, -45, },
+ { -27, 477, 2053, 1517, 118, -42, },
+ { -27, 457, 2036, 1544, 128, -42, },
+ { -27, 437, 2019, 1570, 138, -41, },
+ { -28, 418, 2001, 1596, 148, -39, },
+ { -28, 399, 1983, 1622, 160, -40, },
+ { -29, 380, 1964, 1647, 171, -37, },
+ { -29, 362, 1944, 1672, 183, -36, },
+ { -29, 345, 1924, 1697, 195, -36, },
+ { -30, 328, 1903, 1722, 208, -35, },
+ { -30, 311, 1882, 1746, 221, -34, },
+ { -31, 295, 1860, 1770, 235, -33, },
+ { -31, 279, 1838, 1793, 249, -32, },
+ { -32, 264, 1816, 1816, 264, -32, },
+ { -32, 249, 1793, 1838, 279, -31, },
+ { -33, 235, 1770, 1860, 295, -31, },
+ { -34, 221, 1746, 1882, 311, -30, },
+ { -35, 208, 1722, 1903, 328, -30, },
+ { -35, 195, 1697, 1924, 345, -30, },
+ { -36, 183, 1672, 1944, 362, -29, },
+ { -37, 171, 1647, 1964, 380, -29, },
+ { -38, 160, 1622, 1983, 399, -30, },
+ { -39, 148, 1596, 2001, 418, -28, },
+ { -40, 138, 1570, 2019, 437, -28, },
+ { -42, 128, 1544, 2036, 457, -27, },
+ { -43, 118, 1517, 2053, 477, -26, },
+ { -44, 109, 1491, 2069, 498, -27, },
+ { -46, 100, 1464, 2084, 520, -26, },
+ { -47, 91, 1437, 2099, 541, -25, },
+ { -49, 83, 1410, 2113, 564, -25, },
+ { -50, 76, 1382, 2126, 586, -24, },
+ { -52, 68, 1355, 2139, 609, -23, },
+ { -54, 61, 1327, 2151, 633, -22, },
+ { -55, 55, 1300, 2162, 657, -23, },
+ { -57, 48, 1272, 2172, 681, -20, },
+ { -59, 42, 1245, 2182, 706, -20, },
+ { -61, 37, 1217, 2191, 731, -19, },
+ { -63, 32, 1189, 2199, 756, -17, },
+ { -65, 27, 1162, 2206, 782, -16, },
+ { -67, 22, 1134, 2213, 808, -14, },
+ { -69, 18, 1107, 2218, 834, -12, },
+ { -71, 14, 1079, 2223, 861, -10, },
+ { -73, 10, 1052, 2227, 888, -8, },
+ { -75, 6, 1025, 2231, 915, -6, },
+ { -77, 3, 997, 2233, 943, -3, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16 XV_fixedcoeff_taps6_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 126, 1019, 1806, 1019, 126, 0, },
+ { 120, 1000, 1805, 1038, 132, 1, },
+ { 114, 980, 1804, 1057, 138, 3, },
+ { 108, 961, 1802, 1075, 145, 5, },
+ { 103, 942, 1800, 1094, 152, 5, },
+ { 98, 922, 1797, 1113, 159, 7, },
+ { 93, 903, 1794, 1131, 167, 8, },
+ { 88, 884, 1790, 1150, 174, 10, },
+ { 84, 865, 1786, 1168, 182, 11, },
+ { 80, 846, 1782, 1187, 191, 10, },
+ { 76, 827, 1777, 1205, 199, 12, },
+ { 72, 809, 1771, 1223, 208, 13, },
+ { 68, 790, 1766, 1241, 217, 14, },
+ { 65, 772, 1759, 1259, 226, 15, },
+ { 61, 753, 1753, 1277, 236, 16, },
+ { 58, 735, 1746, 1295, 246, 16, },
+ { 56, 717, 1738, 1313, 256, 16, },
+ { 53, 699, 1730, 1330, 266, 18, },
+ { 50, 682, 1722, 1347, 277, 18, },
+ { 48, 664, 1713, 1364, 288, 19, },
+ { 46, 647, 1704, 1381, 299, 19, },
+ { 43, 630, 1694, 1398, 311, 20, },
+ { 41, 613, 1684, 1414, 323, 21, },
+ { 40, 596, 1674, 1430, 335, 21, },
+ { 38, 580, 1663, 1446, 347, 22, },
+ { 36, 563, 1652, 1462, 360, 23, },
+ { 35, 547, 1641, 1478, 373, 22, },
+ { 33, 531, 1629, 1493, 386, 24, },
+ { 32, 516, 1617, 1508, 399, 24, },
+ { 31, 500, 1604, 1523, 413, 25, },
+ { 30, 485, 1592, 1537, 427, 25, },
+ { 29, 470, 1578, 1551, 441, 27, },
+ { 28, 455, 1565, 1565, 455, 28, },
+ { 27, 441, 1551, 1578, 470, 29, },
+ { 26, 427, 1537, 1592, 485, 29, },
+ { 25, 413, 1523, 1604, 500, 31, },
+ { 24, 399, 1508, 1617, 516, 32, },
+ { 24, 386, 1493, 1629, 531, 33, },
+ { 23, 373, 1478, 1641, 547, 34, },
+ { 22, 360, 1462, 1652, 563, 37, },
+ { 22, 347, 1446, 1663, 580, 38, },
+ { 21, 335, 1430, 1674, 596, 40, },
+ { 20, 323, 1414, 1684, 613, 42, },
+ { 20, 311, 1398, 1694, 630, 43, },
+ { 19, 299, 1381, 1704, 647, 46, },
+ { 19, 288, 1364, 1713, 664, 48, },
+ { 18, 277, 1347, 1722, 682, 50, },
+ { 17, 266, 1330, 1730, 699, 54, },
+ { 17, 256, 1313, 1738, 717, 55, },
+ { 16, 246, 1295, 1746, 735, 58, },
+ { 15, 236, 1277, 1753, 753, 62, },
+ { 15, 226, 1259, 1759, 772, 65, },
+ { 14, 217, 1241, 1766, 790, 68, },
+ { 13, 208, 1223, 1771, 809, 72, },
+ { 12, 199, 1205, 1777, 827, 76, },
+ { 11, 191, 1187, 1782, 846, 79, },
+ { 10, 182, 1168, 1786, 865, 85, },
+ { 9, 174, 1150, 1790, 884, 89, },
+ { 8, 167, 1131, 1794, 903, 93, },
+ { 7, 159, 1113, 1797, 922, 98, },
+ { 6, 152, 1094, 1800, 942, 102, },
+ { 5, 145, 1075, 1802, 961, 108, },
+ { 3, 138, 1057, 1804, 980, 114, },
+ { 2, 132, 1038, 1805, 1000, 119, }
+};
+
+/* ScalingRatio = 4 */
+static const s16 XV_fixedcoeff_taps6_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_6] = {
+ { 176, 1009, 1643, 1009, 176, 83, },
+ { 169, 993, 1644, 1026, 183, 81, },
+ { 162, 978, 1644, 1042, 190, 80, },
+ { 156, 962, 1643, 1058, 198, 79, },
+ { 150, 946, 1642, 1074, 205, 79, },
+ { 144, 930, 1641, 1091, 213, 77, },
+ { 138, 914, 1640, 1107, 222, 75, },
+ { 133, 898, 1638, 1123, 230, 74, },
+ { 128, 882, 1635, 1139, 239, 73, },
+ { 123, 866, 1633, 1154, 248, 72, },
+ { 118, 850, 1629, 1170, 257, 72, },
+ { 114, 834, 1626, 1186, 267, 69, },
+ { 109, 818, 1622, 1201, 276, 70, },
+ { 105, 802, 1618, 1217, 286, 68, },
+ { 101, 786, 1613, 1232, 297, 67, },
+ { 97, 771, 1608, 1247, 307, 66, },
+ { 94, 755, 1603, 1262, 318, 64, },
+ { 91, 739, 1597, 1276, 328, 65, },
+ { 87, 724, 1591, 1291, 339, 64, },
+ { 85, 708, 1585, 1305, 351, 62, },
+ { 82, 693, 1578, 1319, 362, 62, },
+ { 79, 677, 1571, 1333, 374, 62, },
+ { 77, 662, 1563, 1347, 386, 61, },
+ { 75, 647, 1556, 1360, 398, 60, },
+ { 73, 632, 1547, 1373, 410, 61, },
+ { 71, 617, 1539, 1386, 423, 60, },
+ { 69, 602, 1530, 1399, 436, 60, },
+ { 68, 587, 1521, 1412, 449, 59, },
+ { 66, 573, 1511, 1424, 462, 60, },
+ { 65, 558, 1501, 1436, 475, 61, },
+ { 64, 544, 1491, 1447, 488, 62, },
+ { 63, 530, 1481, 1459, 502, 61, },
+ { 62, 516, 1470, 1470, 516, 62, },
+ { 62, 502, 1459, 1481, 530, 62, },
+ { 61, 488, 1447, 1491, 544, 65, },
+ { 61, 475, 1436, 1501, 558, 65, },
+ { 60, 462, 1424, 1511, 573, 66, },
+ { 60, 449, 1412, 1521, 587, 67, },
+ { 60, 436, 1399, 1530, 602, 69, },
+ { 60, 423, 1386, 1539, 617, 71, },
+ { 61, 410, 1373, 1547, 632, 73, },
+ { 61, 398, 1360, 1556, 647, 74, },
+ { 61, 386, 1347, 1563, 662, 77, },
+ { 62, 374, 1333, 1571, 677, 79, },
+ { 62, 362, 1319, 1578, 693, 82, },
+ { 63, 351, 1305, 1585, 708, 84, },
+ { 64, 339, 1291, 1591, 724, 87, },
+ { 64, 328, 1276, 1597, 739, 92, },
+ { 65, 318, 1262, 1603, 755, 93, },
+ { 66, 307, 1247, 1608, 771, 97, },
+ { 67, 297, 1232, 1613, 786, 101, },
+ { 68, 286, 1217, 1618, 802, 105, },
+ { 69, 276, 1201, 1622, 818, 110, },
+ { 70, 267, 1186, 1626, 834, 113, },
+ { 71, 257, 1170, 1629, 850, 119, },
+ { 72, 248, 1154, 1633, 866, 123, },
+ { 73, 239, 1139, 1635, 882, 128, },
+ { 75, 230, 1123, 1638, 898, 132, },
+ { 76, 222, 1107, 1640, 914, 137, },
+ { 77, 213, 1091, 1641, 930, 144, },
+ { 78, 205, 1074, 1642, 946, 151, },
+ { 79, 198, 1058, 1643, 962, 156, },
+ { 80, 190, 1042, 1644, 978, 162, },
+ { 82, 183, 1026, 1644, 993, 168, }
+};
+
+/* ScalingRatio = 2.0 */
+static const s16 XV_fixedcoeff_taps8_SR2[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { -55, 0, 1078, 2049, 1078, 0, -55, 1, },
+ { -53, -7, 1055, 2049, 1102, 7, -56, -1, },
+ { -52, -13, 1032, 2048, 1126, 15, -58, -2, },
+ { -50, -20, 1009, 2047, 1149, 22, -59, -2, },
+ { -49, -26, 986, 2046, 1173, 31, -61, -4, },
+ { -47, -31, 963, 2043, 1197, 39, -62, -6, },
+ { -46, -37, 940, 2040, 1220, 48, -64, -5, },
+ { -45, -42, 917, 2037, 1244, 57, -65, -7, },
+ { -43, -47, 894, 2033, 1267, 66, -67, -7, },
+ { -42, -51, 871, 2028, 1290, 76, -69, -7, },
+ { -41, -55, 848, 2023, 1313, 86, -70, -8, },
+ { -40, -59, 826, 2017, 1336, 97, -72, -9, },
+ { -38, -63, 803, 2010, 1359, 108, -73, -10, },
+ { -37, -67, 781, 2003, 1382, 119, -75, -10, },
+ { -36, -70, 759, 1996, 1405, 130, -76, -12, },
+ { -35, -73, 737, 1987, 1427, 142, -78, -11, },
+ { -34, -76, 715, 1979, 1449, 154, -79, -12, },
+ { -33, -78, 693, 1969, 1471, 167, -81, -12, },
+ { -32, -81, 672, 1959, 1493, 180, -82, -13, },
+ { -31, -83, 650, 1949, 1514, 193, -83, -13, },
+ { -30, -85, 629, 1938, 1536, 207, -85, -14, },
+ { -29, -86, 609, 1926, 1557, 221, -86, -16, },
+ { -28, -88, 588, 1914, 1577, 235, -87, -15, },
+ { -28, -89, 568, 1902, 1598, 250, -88, -17, },
+ { -27, -90, 548, 1889, 1618, 265, -89, -18, },
+ { -26, -91, 528, 1875, 1638, 280, -90, -18, },
+ { -25, -92, 508, 1861, 1657, 296, -91, -18, },
+ { -24, -93, 489, 1846, 1676, 312, -92, -18, },
+ { -24, -93, 470, 1831, 1695, 328, -92, -19, },
+ { -23, -94, 451, 1816, 1714, 345, -93, -20, },
+ { -22, -94, 432, 1800, 1732, 361, -93, -20, },
+ { -22, -94, 414, 1783, 1749, 379, -94, -19, },
+ { -21, -94, 396, 1767, 1767, 396, -94, -21, },
+ { -21, -94, 379, 1749, 1783, 414, -94, -20, },
+ { -20, -93, 361, 1732, 1800, 432, -94, -22, },
+ { -19, -93, 345, 1714, 1816, 451, -94, -24, },
+ { -19, -92, 328, 1695, 1831, 470, -93, -24, },
+ { -18, -92, 312, 1676, 1846, 489, -93, -24, },
+ { -18, -91, 296, 1657, 1861, 508, -92, -25, },
+ { -17, -90, 280, 1638, 1875, 528, -91, -27, },
+ { -17, -89, 265, 1618, 1889, 548, -90, -28, },
+ { -16, -88, 250, 1598, 1902, 568, -89, -29, },
+ { -16, -87, 235, 1577, 1914, 588, -88, -27, },
+ { -15, -86, 221, 1557, 1926, 609, -86, -30, },
+ { -14, -85, 207, 1536, 1938, 629, -85, -30, },
+ { -14, -83, 193, 1514, 1949, 650, -83, -30, },
+ { -13, -82, 180, 1493, 1959, 672, -81, -32, },
+ { -13, -81, 167, 1471, 1969, 693, -78, -32, },
+ { -12, -79, 154, 1449, 1979, 715, -76, -34, },
+ { -12, -78, 142, 1427, 1987, 737, -73, -34, },
+ { -11, -76, 130, 1405, 1996, 759, -70, -37, },
+ { -10, -75, 119, 1382, 2003, 781, -67, -37, },
+ { -10, -73, 108, 1359, 2010, 803, -63, -38, },
+ { -9, -72, 97, 1336, 2017, 826, -59, -40, },
+ { -8, -70, 86, 1313, 2023, 848, -55, -41, },
+ { -8, -69, 76, 1290, 2028, 871, -51, -41, },
+ { -7, -67, 66, 1267, 2033, 894, -47, -43, },
+ { -6, -65, 57, 1244, 2037, 917, -42, -46, },
+ { -5, -64, 48, 1220, 2040, 940, -37, -46, },
+ { -5, -62, 39, 1197, 2043, 963, -31, -48, },
+ { -4, -61, 31, 1173, 2046, 986, -26, -49, },
+ { -3, -59, 22, 1149, 2047, 1009, -20, -49, },
+ { -2, -58, 15, 1126, 2048, 1032, -13, -52, },
+ { -1, -56, 7, 1102, 2049, 1055, -7, -53, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16 XV_fixedcoeff_taps8_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { 0, 275, 1036, 1514, 1036, 275, 0, -40, },
+ { -1, 266, 1023, 1514, 1048, 283, 1, -38, },
+ { -2, 257, 1010, 1513, 1060, 292, 2, -36, },
+ { -3, 249, 997, 1512, 1073, 301, 3, -36, },
+ { -3, 241, 983, 1510, 1085, 310, 5, -35, },
+ { -4, 233, 970, 1509, 1097, 319, 6, -34, },
+ { -5, 225, 957, 1507, 1109, 329, 7, -33, },
+ { -6, 217, 944, 1505, 1121, 338, 9, -32, },
+ { -6, 210, 931, 1503, 1133, 348, 10, -33, },
+ { -7, 202, 917, 1500, 1144, 358, 12, -30, },
+ { -7, 195, 904, 1497, 1156, 368, 13, -30, },
+ { -8, 188, 891, 1494, 1167, 378, 15, -29, },
+ { -9, 181, 877, 1491, 1179, 388, 17, -28, },
+ { -9, 174, 864, 1487, 1190, 398, 19, -27, },
+ { -9, 168, 851, 1483, 1201, 409, 21, -28, },
+ { -10, 161, 837, 1479, 1212, 419, 23, -25, },
+ { -10, 155, 824, 1475, 1223, 430, 25, -26, },
+ { -11, 149, 811, 1470, 1233, 441, 27, -24, },
+ { -11, 142, 798, 1465, 1244, 452, 29, -23, },
+ { -12, 137, 784, 1460, 1254, 463, 32, -22, },
+ { -12, 131, 771, 1455, 1264, 474, 34, -21, },
+ { -12, 125, 758, 1449, 1275, 486, 37, -22, },
+ { -13, 120, 745, 1444, 1284, 497, 40, -21, },
+ { -13, 115, 732, 1438, 1294, 509, 42, -21, },
+ { -13, 109, 719, 1432, 1304, 520, 45, -20, },
+ { -14, 104, 706, 1425, 1313, 532, 48, -18, },
+ { -14, 100, 693, 1418, 1322, 544, 52, -19, },
+ { -14, 95, 680, 1412, 1332, 556, 55, -20, },
+ { -15, 90, 667, 1404, 1340, 568, 58, -16, },
+ { -15, 86, 655, 1397, 1349, 580, 62, -18, },
+ { -16, 82, 642, 1390, 1358, 592, 66, -18, },
+ { -16, 77, 630, 1382, 1366, 605, 69, -17, },
+ { -16, 73, 617, 1374, 1374, 617, 73, -16, },
+ { -17, 69, 605, 1366, 1382, 630, 77, -16, },
+ { -17, 66, 592, 1358, 1390, 642, 82, -17, },
+ { -18, 62, 580, 1349, 1397, 655, 86, -15, },
+ { -18, 58, 568, 1340, 1404, 667, 90, -13, },
+ { -18, 55, 556, 1332, 1412, 680, 95, -16, },
+ { -19, 52, 544, 1322, 1418, 693, 100, -14, },
+ { -19, 48, 532, 1313, 1425, 706, 104, -13, },
+ { -20, 45, 520, 1304, 1432, 719, 109, -13, },
+ { -20, 42, 509, 1294, 1438, 732, 115, -14, },
+ { -21, 40, 497, 1284, 1444, 745, 120, -13, },
+ { -22, 37, 486, 1275, 1449, 758, 125, -12, },
+ { -22, 34, 474, 1264, 1455, 771, 131, -11, },
+ { -23, 32, 463, 1254, 1460, 784, 137, -11, },
+ { -23, 29, 452, 1244, 1465, 798, 142, -11, },
+ { -24, 27, 441, 1233, 1470, 811, 149, -11, },
+ { -25, 25, 430, 1223, 1475, 824, 155, -11, },
+ { -26, 23, 419, 1212, 1479, 837, 161, -9, },
+ { -26, 21, 409, 1201, 1483, 851, 168, -11, },
+ { -27, 19, 398, 1190, 1487, 864, 174, -9, },
+ { -28, 17, 388, 1179, 1491, 877, 181, -9, },
+ { -29, 15, 378, 1167, 1494, 891, 188, -8, },
+ { -29, 13, 368, 1156, 1497, 904, 195, -8, },
+ { -30, 12, 358, 1144, 1500, 917, 202, -7, },
+ { -31, 10, 348, 1133, 1503, 931, 210, -8, },
+ { -32, 9, 338, 1121, 1505, 944, 217, -6, },
+ { -33, 7, 329, 1109, 1507, 957, 225, -5, },
+ { -34, 6, 319, 1097, 1509, 970, 233, -4, },
+ { -35, 5, 310, 1085, 1510, 983, 241, -3, },
+ { -36, 3, 301, 1073, 1512, 997, 249, -3, },
+ { -37, 2, 292, 1060, 1513, 1010, 257, -1, },
+ { -38, 1, 283, 1048, 1514, 1023, 266, -1, }
+};
+
+/* ScalingRatio = 4 */
+static const s16 XV_fixedcoeff_taps8_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_8] = {
+ { 49, 366, 977, 1312, 977, 366, 49, 0, },
+ { 48, 357, 967, 1312, 986, 374, 51, 1, },
+ { 46, 349, 958, 1311, 995, 382, 54, 1, },
+ { 44, 342, 948, 1311, 1004, 390, 56, 1, },
+ { 42, 334, 939, 1310, 1013, 399, 58, 1, },
+ { 40, 326, 929, 1309, 1021, 407, 60, 4, },
+ { 39, 318, 919, 1308, 1030, 415, 63, 4, },
+ { 37, 311, 910, 1307, 1039, 424, 65, 3, },
+ { 36, 303, 900, 1305, 1047, 433, 68, 4, },
+ { 34, 296, 890, 1303, 1055, 442, 70, 6, },
+ { 33, 289, 880, 1301, 1064, 450, 73, 6, },
+ { 32, 282, 870, 1299, 1072, 459, 76, 6, },
+ { 31, 275, 861, 1297, 1080, 468, 79, 5, },
+ { 29, 268, 851, 1295, 1088, 477, 82, 6, },
+ { 28, 261, 841, 1292, 1096, 486, 85, 7, },
+ { 27, 254, 831, 1289, 1104, 496, 88, 7, },
+ { 26, 248, 821, 1287, 1112, 505, 91, 6, },
+ { 25, 241, 811, 1284, 1119, 514, 94, 8, },
+ { 24, 235, 800, 1280, 1127, 523, 98, 9, },
+ { 23, 228, 790, 1277, 1134, 533, 101, 10, },
+ { 22, 222, 780, 1273, 1141, 542, 105, 11, },
+ { 22, 216, 770, 1270, 1148, 552, 109, 9, },
+ { 21, 210, 760, 1266, 1155, 561, 112, 11, },
+ { 20, 204, 750, 1262, 1162, 571, 116, 11, },
+ { 19, 198, 740, 1257, 1169, 581, 120, 12, },
+ { 19, 193, 730, 1253, 1175, 590, 124, 12, },
+ { 18, 187, 720, 1248, 1182, 600, 129, 12, },
+ { 17, 182, 710, 1244, 1188, 610, 133, 12, },
+ { 17, 176, 700, 1239, 1194, 620, 137, 13, },
+ { 16, 171, 690, 1234, 1201, 630, 142, 12, },
+ { 16, 166, 680, 1229, 1206, 640, 146, 13, },
+ { 15, 161, 670, 1223, 1212, 650, 151, 14, },
+ { 15, 156, 660, 1218, 1218, 660, 156, 13, },
+ { 14, 151, 650, 1212, 1223, 670, 161, 15, },
+ { 14, 146, 640, 1206, 1229, 680, 166, 15, },
+ { 13, 142, 630, 1201, 1234, 690, 171, 15, },
+ { 13, 137, 620, 1194, 1239, 700, 176, 17, },
+ { 12, 133, 610, 1188, 1244, 710, 182, 17, },
+ { 12, 129, 600, 1182, 1248, 720, 187, 18, },
+ { 11, 124, 590, 1175, 1253, 730, 193, 20, },
+ { 11, 120, 581, 1169, 1257, 740, 198, 20, },
+ { 11, 116, 571, 1162, 1262, 750, 204, 20, },
+ { 10, 112, 561, 1155, 1266, 760, 210, 22, },
+ { 10, 109, 552, 1148, 1270, 770, 216, 21, },
+ { 10, 105, 542, 1141, 1273, 780, 222, 23, },
+ { 9, 101, 533, 1134, 1277, 790, 228, 24, },
+ { 9, 98, 523, 1127, 1280, 800, 235, 24, },
+ { 8, 94, 514, 1119, 1284, 811, 241, 25, },
+ { 8, 91, 505, 1112, 1287, 821, 248, 24, },
+ { 8, 88, 496, 1104, 1289, 831, 254, 26, },
+ { 7, 85, 486, 1096, 1292, 841, 261, 28, },
+ { 7, 82, 477, 1088, 1295, 851, 268, 28, },
+ { 6, 79, 468, 1080, 1297, 861, 275, 30, },
+ { 6, 76, 459, 1072, 1299, 870, 282, 32, },
+ { 5, 73, 450, 1064, 1301, 880, 289, 34, },
+ { 5, 70, 442, 1055, 1303, 890, 296, 35, },
+ { 4, 68, 433, 1047, 1305, 900, 303, 36, },
+ { 4, 65, 424, 1039, 1307, 910, 311, 36, },
+ { 3, 63, 415, 1030, 1308, 919, 318, 40, },
+ { 3, 60, 407, 1021, 1309, 929, 326, 41, },
+ { 2, 58, 399, 1013, 1310, 939, 334, 41, },
+ { 2, 56, 390, 1004, 1311, 948, 342, 43, },
+ { 1, 54, 382, 995, 1311, 958, 349, 46, },
+ { 1, 51, 374, 986, 1312, 967, 357, 48, }
+};
+
+/* ScalingRatio = 3.0 */
+static const s16 XV_fixedcoeff_taps10_SR3[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ { -31, 0, 359, 1033, 1399, 1033, 359, 0, -31, -25, },
+ { -31, -2, 350, 1022, 1398, 1043, 368, 3, -31, -24, },
+ { -30, -4, 341, 1012, 1398, 1053, 378, 5, -32, -25, },
+ { -30, -6, 333, 1002, 1398, 1062, 387, 8, -32, -26, },
+ { -30, -8, 324, 992, 1397, 1072, 396, 10, -32, -25, },
+ { -30, -10, 315, 981, 1396, 1082, 406, 13, -33, -24, },
+ { -29, -12, 307, 971, 1395, 1091, 415, 16, -33, -25, },
+ { -29, -13, 298, 960, 1393, 1101, 425, 18, -33, -24, },
+ { -29, -15, 290, 949, 1392, 1110, 434, 21, -34, -22, },
+ { -28, -17, 282, 939, 1390, 1120, 444, 25, -34, -25, },
+ { -28, -18, 274, 928, 1388, 1129, 454, 28, -34, -25, },
+ { -28, -20, 266, 917, 1386, 1138, 464, 31, -34, -24, },
+ { -27, -21, 258, 906, 1384, 1147, 474, 34, -35, -24, },
+ { -27, -22, 250, 895, 1381, 1156, 484, 38, -35, -24, },
+ { -27, -23, 242, 885, 1379, 1164, 494, 41, -35, -24, },
+ { -27, -25, 235, 874, 1376, 1173, 504, 45, -35, -24, },
+ { -26, -26, 227, 863, 1373, 1181, 515, 49, -36, -24, },
+ { -26, -27, 220, 852, 1369, 1190, 525, 53, -36, -24, },
+ { -26, -28, 213, 841, 1366, 1198, 535, 56, -36, -23, },
+ { -26, -29, 206, 830, 1362, 1206, 546, 61, -36, -24, },
+ { -25, -29, 199, 819, 1358, 1214, 556, 65, -36, -25, },
+ { -25, -30, 192, 808, 1354, 1222, 567, 69, -36, -25, },
+ { -25, -31, 185, 797, 1350, 1229, 577, 73, -36, -23, },
+ { -25, -32, 178, 785, 1346, 1237, 588, 78, -36, -23, },
+ { -25, -32, 172, 774, 1341, 1244, 599, 83, -36, -24, },
+ { -25, -33, 165, 763, 1336, 1252, 610, 87, -36, -23, },
+ { -24, -33, 159, 752, 1331, 1259, 620, 92, -36, -24, },
+ { -24, -34, 153, 741, 1326, 1266, 631, 97, -36, -24, },
+ { -24, -34, 147, 730, 1321, 1272, 642, 102, -36, -24, },
+ { -24, -35, 141, 719, 1315, 1279, 653, 107, -36, -23, },
+ { -24, -35, 135, 708, 1310, 1285, 664, 113, -36, -24, },
+ { -24, -35, 129, 697, 1304, 1292, 675, 118, -36, -24, },
+ { -24, -36, 124, 686, 1298, 1298, 686, 124, -36, -24, },
+ { -24, -36, 118, 675, 1292, 1304, 697, 129, -35, -24, },
+ { -24, -36, 113, 664, 1285, 1310, 708, 135, -35, -24, },
+ { -24, -36, 107, 653, 1279, 1315, 719, 141, -35, -23, },
+ { -24, -36, 102, 642, 1272, 1321, 730, 147, -34, -24, },
+ { -23, -36, 97, 631, 1266, 1326, 741, 153, -34, -25, },
+ { -23, -36, 92, 620, 1259, 1331, 752, 159, -33, -25, },
+ { -23, -36, 87, 610, 1252, 1336, 763, 165, -33, -25, },
+ { -23, -36, 83, 599, 1244, 1341, 774, 172, -32, -26, },
+ { -23, -36, 78, 588, 1237, 1346, 785, 178, -32, -25, },
+ { -23, -36, 73, 577, 1229, 1350, 797, 185, -31, -25, },
+ { -23, -36, 69, 567, 1222, 1354, 808, 192, -30, -27, },
+ { -23, -36, 65, 556, 1214, 1358, 819, 199, -29, -27, },
+ { -24, -36, 61, 546, 1206, 1362, 830, 206, -29, -26, },
+ { -24, -36, 56, 535, 1198, 1366, 841, 213, -28, -25, },
+ { -24, -36, 53, 525, 1190, 1369, 852, 220, -27, -26, },
+ { -24, -36, 49, 515, 1181, 1373, 863, 227, -26, -26, },
+ { -24, -35, 45, 504, 1173, 1376, 874, 235, -25, -27, },
+ { -24, -35, 41, 494, 1164, 1379, 885, 242, -23, -27, },
+ { -24, -35, 38, 484, 1156, 1381, 895, 250, -22, -27, },
+ { -24, -35, 34, 474, 1147, 1384, 906, 258, -21, -27, },
+ { -24, -34, 31, 464, 1138, 1386, 917, 266, -20, -28, },
+ { -24, -34, 28, 454, 1129, 1388, 928, 274, -18, -29, },
+ { -24, -34, 25, 444, 1120, 1390, 939, 282, -17, -29, },
+ { -24, -34, 21, 434, 1110, 1392, 949, 290, -15, -27, },
+ { -24, -33, 18, 425, 1101, 1393, 960, 298, -13, -29, },
+ { -24, -33, 16, 415, 1091, 1395, 971, 307, -12, -30, },
+ { -25, -33, 13, 406, 1082, 1396, 981, 315, -10, -29, },
+ { -25, -32, 10, 396, 1072, 1397, 992, 324, -8, -30, },
+ { -25, -32, 8, 387, 1062, 1398, 1002, 333, -6, -31, },
+ { -25, -32, 5, 378, 1053, 1398, 1012, 341, -4, -30, },
+ { -25, -31, 3, 368, 1043, 1398, 1022, 350, -2, -30, }
+};
+
+/* ScalingRatio = 4 */
+static const s16 XV_fixedcoeff_taps10_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_10] = {
+ { 0, 107, 454, 924, 1150, 924, 454, 107, 0, -24, },
+ { 0, 104, 446, 917, 1149, 930, 461, 110, 0, -21, },
+ { -1, 100, 439, 910, 1149, 936, 468, 114, 1, -20, },
+ { -1, 97, 432, 904, 1149, 942, 475, 117, 2, -21, },
+ { -2, 94, 425, 897, 1148, 948, 482, 121, 2, -19, },
+ { -2, 91, 418, 890, 1147, 954, 490, 125, 3, -20, },
+ { -3, 88, 411, 883, 1147, 960, 497, 128, 3, -18, },
+ { -3, 85, 404, 876, 1146, 966, 504, 132, 4, -18, },
+ { -3, 82, 397, 869, 1145, 972, 512, 136, 5, -19, },
+ { -4, 79, 390, 862, 1144, 978, 519, 140, 5, -17, },
+ { -4, 76, 384, 855, 1142, 983, 526, 144, 6, -16, },
+ { -4, 74, 377, 848, 1141, 989, 534, 148, 7, -18, },
+ { -5, 71, 370, 841, 1139, 995, 541, 152, 7, -15, },
+ { -5, 68, 364, 834, 1138, 1000, 549, 156, 8, -16, },
+ { -5, 66, 357, 827, 1136, 1005, 556, 160, 9, -15, },
+ { -6, 63, 350, 820, 1134, 1011, 564, 165, 10, -15, },
+ { -6, 61, 344, 812, 1132, 1016, 571, 169, 11, -14, },
+ { -6, 59, 338, 805, 1130, 1021, 579, 174, 12, -16, },
+ { -6, 56, 331, 798, 1128, 1026, 586, 178, 13, -14, },
+ { -7, 54, 325, 790, 1126, 1031, 594, 183, 14, -14, },
+ { -7, 52, 319, 783, 1124, 1036, 601, 187, 15, -14, },
+ { -7, 50, 312, 776, 1121, 1041, 609, 192, 16, -14, },
+ { -7, 48, 306, 768, 1119, 1045, 617, 197, 17, -14, },
+ { -8, 46, 300, 761, 1116, 1050, 624, 202, 18, -13, },
+ { -8, 44, 294, 753, 1113, 1054, 632, 207, 19, -12, },
+ { -8, 42, 288, 746, 1110, 1059, 639, 212, 20, -12, },
+ { -8, 40, 282, 738, 1107, 1063, 647, 217, 22, -12, },
+ { -9, 38, 277, 731, 1104, 1067, 655, 222, 23, -12, },
+ { -9, 36, 271, 723, 1101, 1071, 662, 227, 24, -10, },
+ { -9, 35, 265, 715, 1097, 1075, 670, 232, 26, -10, },
+ { -9, 33, 259, 708, 1094, 1079, 677, 238, 27, -10, },
+ { -10, 32, 254, 700, 1091, 1083, 685, 243, 28, -10, },
+ { -10, 30, 248, 693, 1087, 1087, 693, 248, 30, -10, },
+ { -10, 28, 243, 685, 1083, 1091, 700, 254, 32, -10, },
+ { -10, 27, 238, 677, 1079, 1094, 708, 259, 33, -9, },
+ { -11, 26, 232, 670, 1075, 1097, 715, 265, 35, -8, },
+ { -11, 24, 227, 662, 1071, 1101, 723, 271, 36, -8, },
+ { -11, 23, 222, 655, 1067, 1104, 731, 277, 38, -10, },
+ { -12, 22, 217, 647, 1063, 1107, 738, 282, 40, -8, },
+ { -12, 20, 212, 639, 1059, 1110, 746, 288, 42, -8, },
+ { -12, 19, 207, 632, 1054, 1113, 753, 294, 44, -8, },
+ { -12, 18, 202, 624, 1050, 1116, 761, 300, 46, -9, },
+ { -13, 17, 197, 617, 1045, 1119, 768, 306, 48, -8, },
+ { -13, 16, 192, 609, 1041, 1121, 776, 312, 50, -8, },
+ { -13, 15, 187, 601, 1036, 1124, 783, 319, 52, -8, },
+ { -14, 14, 183, 594, 1031, 1126, 790, 325, 54, -7, },
+ { -14, 13, 178, 586, 1026, 1128, 798, 331, 56, -6, },
+ { -14, 12, 174, 579, 1021, 1130, 805, 338, 59, -8, },
+ { -15, 11, 169, 571, 1016, 1132, 812, 344, 61, -5, },
+ { -15, 10, 165, 564, 1011, 1134, 820, 350, 63, -6, },
+ { -16, 9, 160, 556, 1005, 1136, 827, 357, 66, -4, },
+ { -16, 8, 156, 549, 1000, 1138, 834, 364, 68, -5, },
+ { -16, 7, 152, 541, 995, 1139, 841, 370, 71, -4, },
+ { -17, 7, 148, 534, 989, 1141, 848, 377, 74, -5, },
+ { -17, 6, 144, 526, 983, 1142, 855, 384, 76, -3, },
+ { -18, 5, 140, 519, 978, 1144, 862, 390, 79, -3, },
+ { -18, 5, 136, 512, 972, 1145, 869, 397, 82, -4, },
+ { -19, 4, 132, 504, 966, 1146, 876, 404, 85, -2, },
+ { -19, 3, 128, 497, 960, 1147, 883, 411, 88, -2, },
+ { -20, 3, 125, 490, 954, 1147, 890, 418, 91, -2, },
+ { -20, 2, 121, 482, 948, 1148, 897, 425, 94, -1, },
+ { -21, 2, 117, 475, 942, 1149, 904, 432, 97, -1, },
+ { -21, 1, 114, 468, 936, 1149, 910, 439, 100, 0, },
+ { -22, 0, 110, 461, 930, 1149, 917, 446, 104, 1, }
+};
+
+/* ScalingRatio = 4 */
+static const s16 XV_fixedcoeff_taps12_SR4[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_TAPS_12] = {
+ { -19, 0, 152, 498, 893, 1070, 893, 498, 152, 0, -19, -22, },
+ { -19, -1, 147, 487, 879, 1059, 889, 499, 155, 1, -19, 19, },
+ { -19, -2, 143, 480, 874, 1059, 894, 506, 159, 2, -19, 19, },
+ { -19, -3, 139, 474, 869, 1059, 899, 512, 163, 3, -19, 19, },
+ { -19, -4, 136, 468, 863, 1059, 904, 519, 167, 4, -19, 18, },
+ { -19, -5, 132, 461, 858, 1058, 909, 525, 171, 5, -19, 20, },
+ { -19, -5, 128, 455, 853, 1058, 913, 531, 175, 7, -19, 19, },
+ { -18, -6, 125, 449, 847, 1057, 918, 538, 180, 8, -19, 17, },
+ { -18, -7, 121, 443, 842, 1056, 923, 544, 184, 9, -19, 18, },
+ { -18, -8, 118, 436, 836, 1056, 927, 551, 188, 10, -19, 19, },
+ { -18, -8, 114, 430, 831, 1055, 932, 557, 193, 12, -19, 17, },
+ { -18, -9, 111, 424, 825, 1054, 936, 564, 197, 13, -19, 18, },
+ { -18, -10, 107, 418, 819, 1053, 941, 570, 202, 14, -19, 19, },
+ { -18, -10, 104, 412, 814, 1052, 945, 577, 206, 16, -19, 17, },
+ { -18, -11, 101, 406, 808, 1050, 949, 583, 211, 17, -19, 19, },
+ { -18, -11, 98, 400, 802, 1049, 954, 590, 216, 19, -19, 16, },
+ { -18, -12, 95, 394, 796, 1048, 958, 596, 220, 20, -19, 18, },
+ { -18, -12, 92, 388, 791, 1046, 962, 603, 225, 22, -19, 16, },
+ { -18, -13, 89, 382, 785, 1045, 966, 609, 230, 24, -19, 16, },
+ { -18, -13, 86, 376, 779, 1043, 970, 616, 235, 25, -19, 16, },
+ { -18, -14, 83, 370, 773, 1041, 973, 622, 240, 27, -19, 18, },
+ { -18, -14, 80, 364, 767, 1039, 977, 629, 244, 29, -19, 18, },
+ { -18, -15, 77, 358, 761, 1037, 981, 635, 249, 31, -19, 19, },
+ { -18, -15, 74, 352, 755, 1035, 984, 642, 255, 33, -19, 18, },
+ { -18, -15, 71, 347, 749, 1033, 988, 648, 260, 35, -19, 17, },
+ { -18, -16, 69, 341, 743, 1031, 991, 654, 265, 36, -19, 19, },
+ { -18, -16, 66, 335, 736, 1029, 995, 661, 270, 38, -19, 19, },
+ { -18, -16, 64, 330, 730, 1026, 998, 667, 275, 41, -18, 17, },
+ { -18, -17, 61, 324, 724, 1024, 1001, 674, 280, 43, -18, 18, },
+ { -18, -17, 59, 318, 718, 1021, 1004, 680, 286, 45, -18, 18, },
+ { -18, -17, 56, 313, 712, 1019, 1007, 686, 291, 47, -18, 18, },
+ { -18, -17, 54, 307, 705, 1016, 1010, 693, 296, 49, -18, 19, },
+ { -18, -18, 51, 302, 699, 1013, 1013, 699, 302, 51, -18, 20, },
+ { -18, -18, 49, 296, 693, 1010, 1016, 705, 307, 54, -17, 19, },
+ { -18, -18, 47, 291, 686, 1007, 1019, 712, 313, 56, -17, 18, },
+ { -18, -18, 45, 286, 680, 1004, 1021, 718, 318, 59, -17, 18, },
+ { -18, -18, 43, 280, 674, 1001, 1024, 724, 324, 61, -17, 18, },
+ { -18, -18, 41, 275, 667, 998, 1026, 730, 330, 64, -16, 17, },
+ { -18, -19, 38, 270, 661, 995, 1029, 736, 335, 66, -16, 19, },
+ { -19, -19, 36, 265, 654, 991, 1031, 743, 341, 69, -16, 20, },
+ { -19, -19, 35, 260, 648, 988, 1033, 749, 347, 71, -15, 18, },
+ { -19, -19, 33, 255, 642, 984, 1035, 755, 352, 74, -15, 19, },
+ { -19, -19, 31, 249, 635, 981, 1037, 761, 358, 77, -15, 20, },
+ { -19, -19, 29, 244, 629, 977, 1039, 767, 364, 80, -14, 19, },
+ { -19, -19, 27, 240, 622, 973, 1041, 773, 370, 83, -14, 19, },
+ { -19, -19, 25, 235, 616, 970, 1043, 779, 376, 86, -13, 17, },
+ { -19, -19, 24, 230, 609, 966, 1045, 785, 382, 89, -13, 17, },
+ { -19, -19, 22, 225, 603, 962, 1046, 791, 388, 92, -12, 17, },
+ { -19, -19, 20, 220, 596, 958, 1048, 796, 394, 95, -12, 19, },
+ { -20, -19, 19, 216, 590, 954, 1049, 802, 400, 98, -11, 18, },
+ { -20, -19, 17, 211, 583, 949, 1050, 808, 406, 101, -11, 21, },
+ { -20, -19, 16, 206, 577, 945, 1052, 814, 412, 104, -10, 19, },
+ { -20, -19, 14, 202, 570, 941, 1053, 819, 418, 107, -10, 21, },
+ { -20, -19, 13, 197, 564, 936, 1054, 825, 424, 111, -9, 20, },
+ { -20, -19, 12, 193, 557, 932, 1055, 831, 430, 114, -8, 19, },
+ { -21, -19, 10, 188, 551, 927, 1056, 836, 436, 118, -8, 22, },
+ { -21, -19, 9, 184, 544, 923, 1056, 842, 443, 121, -7, 21, },
+ { -21, -19, 8, 180, 538, 918, 1057, 847, 449, 125, -6, 20, },
+ { -21, -19, 7, 175, 531, 913, 1058, 853, 455, 128, -5, 21, },
+ { -21, -19, 5, 171, 525, 909, 1058, 858, 461, 132, -5, 22, },
+ { -21, -19, 4, 167, 519, 904, 1059, 863, 468, 136, -4, 20, },
+ { -22, -19, 3, 163, 512, 899, 1059, 869, 474, 139, -3, 22, },
+ { -22, -19, 2, 159, 506, 894, 1059, 874, 480, 143, -2, 22, },
+ { -22, -19, 1, 155, 499, 889, 1059, 879, 487, 147, -1, 22, }
+};
+
+#define XV_HSCALER_CTRL_WIDTH_HWREG_HFLTCOEFF (16)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_HFLTCOEFF (384)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE (0x2000)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_HIGH (0x3fff)
+#define XV_HSCALER_CTRL_WIDTH_HWREG_PHASESH_V (18)
+#define XV_HSCALER_CTRL_DEPTH_HWREG_PHASESH_V (1920)
+#define XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX (0x4000)
+
+/* H-scaler masks */
+#define XV_HSCALER_PHASESH_V_OUTPUT_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XV_VSCALER_CTRL_ADDR_AP_CTRL (0x000)
+#define XV_VSCALER_CTRL_ADDR_GIE (0x004)
+#define XV_VSCALER_CTRL_ADDR_IER (0x008)
+#define XV_VSCALER_CTRL_ADDR_ISR (0x00c)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA (0x010)
+#define XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA (0x018)
+#define XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA (0x020)
+#define XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA (0x028)
+#define XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA (0x030)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE (0x800)
+#define XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_HIGH (0xbff)
+
+#define XV_VSCALER_CTRL_WIDTH_HWREG_VFLTCOEFF (16)
+#define XV_VSCALER_CTRL_DEPTH_HWREG_VFLTCOEFF (384)
+
+/* These bits are for xscaler feature flags */
+#define XSCALER_CLK_PROP BIT(0)
+#define XSCALER_HPHASE_FIX BIT(1)
+
+/**
+ * struct xscaler_feature - dt or IP property structure
+ * @flags: Bitmask of properties enabled in IP or dt
+ */
+struct xscaler_feature {
+ u32 flags;
+};
+
+/**
+ * struct xscaler_device - Xilinx Scaler device structure
+ * @xvip: Xilinx Video IP device
+ * @pads: Scaler sub-device media pads
+ * @formats: V4L2 media bus formats at the sink and source pads
+ * @default_formats: default V4L2 media bus formats
+ * @vip_formats: Xilinx Video IP format retrieved from the DT
+ * @num_hori_taps: number of horizontal taps
+ * @num_vert_taps: number of vertical taps
+ * @max_num_phases: maximum number of phases
+ * @pix_per_clk: Pixels per Clock cycle the IP operates upon
+ * @max_pixels: The maximum number of pixels that the H-scaler examines
+ * @max_lines: The maximum number of lines that the V-scaler examines
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @hscaler_coeff: The complete array of H-scaler coefficients
+ * @vscaler_coeff: The complete array of V-scaler coefficients
+ * @is_polyphase: Track if scaling algorithm is polyphase or not
+ * @rst_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ * @cfg: Pointer to scaler config structure
+ * @aclk_axis: AXI4-Stream video interface clock
+ * @aclk_ctrl: AXI4-Lite control interface clock
+ */
+struct xscaler_device {
+ struct xvip_device xvip;
+
+ struct media_pad pads[2];
+ struct v4l2_mbus_framefmt formats[2];
+ struct v4l2_mbus_framefmt default_formats[2];
+ const struct xvip_video_format *vip_formats[2];
+
+ u32 num_hori_taps;
+ u32 num_vert_taps;
+ u32 max_num_phases;
+ u32 pix_per_clk;
+ u32 max_pixels;
+ u32 max_lines;
+ u64 H_phases[XV_HSCALER_MAX_LINE_WIDTH];
+ short hscaler_coeff[XV_HSCALER_MAX_H_PHASES][XV_HSCALER_MAX_H_TAPS];
+ short vscaler_coeff[XV_VSCALER_MAX_V_PHASES][XV_VSCALER_MAX_V_TAPS];
+ bool is_polyphase;
+
+ struct gpio_desc *rst_gpio;
+ const struct xscaler_feature *cfg;
+ struct clk *aclk_axis;
+ struct clk *aclk_ctrl;
+};
+
+static const struct xscaler_feature xlnx_scaler_v2_2 = {
+ .flags = XSCALER_CLK_PROP | XSCALER_HPHASE_FIX,
+};
+
+static const struct xscaler_feature xlnx_scaler_v1_0 = {
+ .flags = XSCALER_CLK_PROP,
+};
+
+static const struct xscaler_feature xlnx_scaler = {
+ .flags = 0,
+};
+
+static const struct of_device_id xscaler_of_id_table[] = {
+ { .compatible = "xlnx,v-vpss-scaler",
+ .data = &xlnx_scaler},
+ { .compatible = "xlnx,v-vpss-scaler-1.0",
+ .data = &xlnx_scaler_v1_0},
+ { .compatible = "xlnx,v-vpss-scaler-2.2",
+ .data = &xlnx_scaler_v2_2},
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xscaler_of_id_table);
+
+static inline struct xscaler_device *to_scaler(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct xscaler_device, xvip.subdev);
+}
+
+static void
+xv_hscaler_calculate_phases(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out, u32 pixel_rate)
+{
+ unsigned int loop_width;
+ unsigned int x, s;
+ int offset = 0;
+ int xwrite_pos = 0;
+ bool output_write_en;
+ bool get_new_pix;
+ u64 phaseH;
+ u64 array_idx = 0;
+ int nr_rds;
+ int nr_rds_clck;
+ unsigned int nphases = xscaler->max_num_phases;
+ unsigned int nppc = xscaler->pix_per_clk;
+ unsigned int shift = XHSC_STEP_PRECISION_SHIFT - ilog2(nphases);
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XHSC_STEP_PRECISION_SHIFT) != 0) {
+ /* read a new input sample */
+ get_new_pix = true;
+ offset -= (1 << XHSC_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XHSC_STEP_PRECISION_SHIFT) == 0) &&
+ (xwrite_pos < width_out)) {
+ /* produce a new output sample */
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ if (nppc == XSCALER_PPC_4) {
+ xscaler->H_phases[x] |=
+ ((u64)phaseH <<
+ (s * XHSC_HPHASE_MUL_4PPC));
+ xscaler->H_phases[x] |=
+ ((u64)array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MUL_4PPC)));
+ if (output_write_en)
+ xscaler->H_phases[x] |=
+ ((u64)1 <<
+ (XHSC_HPHASE_MULTIPLIER + s *
+ XHSC_HPHASE_MUL_4PPC));
+ } else {
+ xscaler->H_phases[x] |=
+ (phaseH <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ xscaler->H_phases[x] |=
+ (array_idx <<
+ (XHSC_HPHASE_SHIFT_BY_6 +
+ (s * XHSC_HPHASE_MULTIPLIER)));
+
+ if (output_write_en)
+ xscaler->H_phases[x] |=
+ (XV_HSCALER_PHASESH_V_OUTPUT_WR_EN <<
+ (s * XHSC_HPHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void
+xv_hscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_HSCALER_MAX_H_TAPS - ntaps;
+ offset = pad >> 1;
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->hscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_HSCALER_MAX_H_TAPS; j++)
+ xscaler->hscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+static const short *xv_select_coeff(struct xscaler_device *xscaler,
+ u32 in, u32 out, u32 *ntaps)
+{
+ const short *coeff = NULL;
+
+ /*
+ * Scale Down Mode will use dynamic filter selection logic
+ * Scale Up Mode (including 1:1) will always use 6 tap filter
+ */
+ if (out < in) {
+ u16 scale_ratio = (in * 10) / out;
+
+ /* Since XV_HSCALER_TAPS_* is same as XV_VSCALER_TAPS_* */
+ switch (*ntaps) {
+ case XV_HSCALER_TAPS_6:
+ *ntaps = XV_HSCALER_TAPS_6;
+ if (scale_ratio > 35)
+ coeff = &XV_fixedcoeff_taps6_SR4[0][0];
+ else if (scale_ratio > 25)
+ coeff = &XV_fixedcoeff_taps6_SR3[0][0];
+ else if (scale_ratio > 15)
+ coeff = &XV_fixedcoeff_taps6_SR2[0][0];
+ else
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ break;
+ case XV_HSCALER_TAPS_8:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps8_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps8_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_10:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps10_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps10_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ case XV_HSCALER_TAPS_12:
+ if (scale_ratio > 35) {
+ coeff = &XV_fixedcoeff_taps12_SR4[0][0];
+ *ntaps = XV_HSCALER_TAPS_12;
+ } else if (scale_ratio > 25) {
+ coeff = &XV_fixedcoeff_taps10_SR3[0][0];
+ *ntaps = XV_HSCALER_TAPS_10;
+ } else if (scale_ratio > 15) {
+ coeff = &XV_fixedcoeff_taps8_SR2[0][0];
+ *ntaps = XV_HSCALER_TAPS_8;
+ } else {
+ coeff = &XV_fixedcoeff_taps6_SR1p2[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Unsupported number of taps = %d",
+ *ntaps);
+ }
+ } else {
+ dev_dbg(xscaler->xvip.dev, "scaler : scale up 6 tap");
+ coeff = &XV_lanczos2_taps6[0][0];
+ *ntaps = XV_HSCALER_TAPS_6;
+ }
+
+ return coeff;
+}
+
+/**
+ * xv_hscaler_coeff_select - Selection of H-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @width_in: Width of input video
+ * @width_out: Width of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 12-tap
+ * filter may operate with 10 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * H-scaler number of taps.
+ */
+static int
+xv_hscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 width_in, u32 width_out)
+{
+ const short *coeff;
+ u32 ntaps = xscaler->num_hori_taps;
+
+ coeff = xv_select_coeff(xscaler, width_in, width_out, &ntaps);
+ if (!coeff)
+ return -EINVAL;
+
+ xv_hscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+static void xv_hscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ int val, i, j, offset, rd_indx;
+ u32 ntaps = xscaler->num_hori_taps;
+ u32 nphases = xscaler->max_num_phases;
+ u32 base_addr;
+
+ offset = (XV_HSCALER_MAX_H_TAPS - ntaps) / 2;
+ base_addr = V_HSCALER_OFF + XV_HSCALER_CTRL_ADDR_HWREG_HFLTCOEFF_BASE;
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->hscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->hscaler_coeff[i][rd_indx] &
+ XHSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip, base_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void
+xv_vscaler_load_ext_coeff(struct xscaler_device *xscaler,
+ const short *coeff, u32 ntaps)
+{
+ int i, j, pad, offset;
+ u32 nphases = xscaler->max_num_phases;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XV_VSCALER_MAX_V_TAPS - ntaps;
+ offset = pad ? (pad >> 1) : 0;
+
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Pad = %d Offset = %d Nphases = %d ntaps = %d",
+ __func__, pad, offset, nphases, ntaps);
+
+ /* Load User defined coefficients into scaler coefficient table */
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xscaler->vscaler_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+
+ if (pad) { /* effective taps < max_taps */
+ for (i = 0; i < nphases; i++) {
+ /* pad left */
+ for (j = 0; j < offset; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ /* pad right */
+ j = ntaps + offset;
+ for (; j < XV_VSCALER_MAX_V_TAPS; j++)
+ xscaler->vscaler_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xv_vscaler_set_coeff(struct xscaler_device *xscaler)
+{
+ u32 nphases = xscaler->max_num_phases;
+ u32 ntaps = xscaler->num_vert_taps;
+ int val, i, j, offset, rd_indx;
+ u32 base_addr;
+
+ offset = (XV_VSCALER_MAX_V_TAPS - ntaps) / 2;
+ base_addr = V_VSCALER_OFF + XV_VSCALER_CTRL_ADDR_HWREG_VFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xscaler->vscaler_coeff[i][rd_indx + 1] <<
+ XSCALER_BITSHIFT_16) |
+ (xscaler->vscaler_coeff[i][rd_indx] &
+ XVSC_MASK_LOW_16BITS);
+ xvip_write(&xscaler->xvip,
+ base_addr + ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+/**
+ * xv_vscaler_coeff_select - Selection of V-Scaler coefficients of operation
+ * @xscaler: VPSS Scaler device information
+ * @height_in: Height of input video
+ * @height_out: Height of desired output video
+ *
+ * There are instances when a N-tap filter might operate in an M-tap
+ * configuration where N > M.
+ *
+ * For example :
+ * Depending on the ratio of scaling (while downscaling), a 10-tap
+ * filter may operate with 6 tap coefficients and zero-pads the remaining
+ * coefficients.
+ *
+ * While upscaling the driver will program 6-tap filter coefficients
+ * in any N-tap configurations (for N >= 6).
+ *
+ * This selection is adopted by the as it gives optimal
+ * video output determined by repeated testing of the IP
+ *
+ * Return: Will return 0 if successful. Returns -EINVAL on an unsupported
+ * V-scaler number of taps.
+ */
+static int
+xv_vscaler_select_coeff(struct xscaler_device *xscaler,
+ u32 height_in, u32 height_out)
+{
+ const short *coeff;
+ u32 ntaps = xscaler->num_vert_taps;
+
+ coeff = xv_select_coeff(xscaler, height_in, height_out, &ntaps);
+ if (!coeff)
+ return -EINVAL;
+
+ xv_vscaler_load_ext_coeff(xscaler, coeff, ntaps);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+
+static inline void
+xv_procss_disable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_clr(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static inline void
+xv_procss_enable_block(struct xvip_device *xvip, u32 channel, u32 ip_block)
+{
+ xvip_set(xvip, ((channel - 1) * XGPIO_CHAN_OFFSET) +
+ XGPIO_DATA_OFFSET + S_AXIS_RESET_OFF,
+ ip_block);
+}
+
+static void xscaler_reset(struct xscaler_device *xscaler)
+{
+ xv_procss_disable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_ALL_BLOCKS);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_IP_AXIS);
+}
+
+static int
+xv_vscaler_setup_video_fmt(struct xscaler_device *xscaler, u32 code_in)
+{
+ u32 video_in;
+
+ switch (code_in) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 420");
+ video_in = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 422");
+ video_in = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format YUV 444");
+ video_in = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Vscaler Input Media Format RGB");
+ video_in = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Vscaler Unsupported Input Media Format 0x%x",
+ code_in);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ video_in);
+ /*
+ * Vscaler will upscale to YUV 422 before
+ * Hscaler starts operation
+ */
+ if (video_in == XVIDC_CSF_YCRCB_420)
+ return XVIDC_CSF_YCRCB_422;
+ return video_in;
+}
+
+static int xv_hscaler_setup_video_fmt(struct xscaler_device *xscaler,
+ u32 code_out, u32 vsc_out)
+{
+ u32 video_out;
+
+ switch (vsc_out) {
+ case XVIDC_CSF_YCRCB_422:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 422");
+ break;
+ case XVIDC_CSF_YCRCB_444:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is YUV 444");
+ break;
+ case XVIDC_CSF_RGB:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Input Media Format is RGB");
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler got unsupported format from Vscaler");
+ return -EINVAL;
+ }
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODE_DATA,
+ vsc_out);
+
+ switch (code_out) {
+ case MEDIA_BUS_FMT_VYYUYY8_1X24:
+ case MEDIA_BUS_FMT_VYYUYY10_4X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 420\n");
+ video_out = XVIDC_CSF_YCRCB_420;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 422\n");
+ video_out = XVIDC_CSF_YCRCB_422;
+ break;
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_VUY10_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format YUV 444\n");
+ video_out = XVIDC_CSF_YCRCB_444;
+ break;
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RBG101010_1X30:
+ dev_dbg(xscaler->xvip.dev,
+ "Hscaler Output Media Format RGB\n");
+ video_out = XVIDC_CSF_RGB;
+ break;
+ default:
+ dev_err(xscaler->xvip.dev,
+ "Hscaler Unsupported Output Media Format 0x%x",
+ code_out);
+ return -EINVAL;
+ }
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_COLORMODEOUT_DATA,
+ video_out);
+ return 0;
+}
+
+static void
+xv_hscaler_set_phases(struct xscaler_device *xscaler)
+{
+ u32 loop_width;
+ u32 index = 0, val;
+ u32 offset, i, j = 0, lsb, msb;
+ u64 phasehdata;
+
+ loop_width = xscaler->max_pixels / xscaler->pix_per_clk;
+
+ if (xscaler->cfg->flags & XSCALER_HPHASE_FIX) {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASEH_FIX;
+ } else {
+ offset = V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PHASESH_V_BASE;
+ }
+
+ switch (xscaler->pix_per_clk) {
+ case XSCALER_PPC_1:
+ /*
+ * phaseH is 64 bits but only lower 16 bits of each entry
+ * is valid .Form a 32 bit word with 16bit LSB from 2
+ * consecutive entries. Need 1 32b write to get 2 entries
+ * into IP registers (i is array loc and index is
+ * address offset)
+ */
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = lower_32_bits(xscaler->H_phases[i] &
+ XHSC_MASK_LOW_16BITS);
+ msb = lower_32_bits(xscaler->H_phases[i + 1] &
+ XHSC_MASK_LOW_16BITS);
+ val = (msb << 16 | lsb);
+ xvip_write(&xscaler->xvip, offset + (index * 4), val);
+ ++index;
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 1 PPC design", __func__);
+ return;
+ case XSCALER_PPC_2:
+ /*
+ * PhaseH is 64bits but only lower 32b of each entry is valid
+ * Need 1 32b write to get each entry into IP registers
+ */
+ for (i = 0; i < loop_width; i++) {
+ val = lower_32_bits(xscaler->H_phases[i] &
+ XHSC_MASK_LOW_32BITS);
+ xvip_write(&xscaler->xvip, offset + (i * 4), val);
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 2 PPC design", __func__);
+ return;
+ case XSCALER_PPC_4:
+ /*
+ * PhaseH is 64bits and each entry has valid 32b MSB & LSB
+ * Need 2 32b writes to get each entry into IP registers
+ * (index is array loc and offset is address offset)
+ */
+ for (i = 0; i < loop_width; i++) {
+ phasehdata = xscaler->H_phases[index++];
+ lsb = (u32)(phasehdata & XHSC_MASK_LOW_32BITS);
+ msb = (u32)((phasehdata >> 32) & XHSC_MASK_LOW_32BITS);
+ xvip_write(&xscaler->xvip, offset + (j * 4), lsb);
+ xvip_write(&xscaler->xvip, offset + ((j + 1) * 4), msb);
+ j += 2;
+ }
+ dev_dbg(xscaler->xvip.dev,
+ "%s : Operating in 4 PPC design", __func__);
+ return;
+ default:
+ dev_warn(xscaler->xvip.dev, "%s : %d unsupported ppc design!!!\n",
+ __func__, xscaler->pix_per_clk);
+ }
+}
+
+static int xscaler_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ u32 width_in, width_out;
+ u32 height_in, height_out;
+ u32 code_in, code_out;
+ u32 pixel_rate;
+ u32 line_rate;
+ int ret;
+
+ if (!enable) {
+ dev_dbg(xscaler->xvip.dev, "%s: Stream Off", __func__);
+ /* Reset the Global IP Reset through PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_ASSERT);
+ gpiod_set_value_cansleep(xscaler->rst_gpio,
+ XSCALER_RESET_DEASSERT);
+ xscaler_reset(xscaler);
+ memset(xscaler->H_phases, 0, sizeof(xscaler->H_phases));
+ return 0;
+ }
+
+ dev_dbg(xscaler->xvip.dev, "%s: Stream On", __func__);
+
+ /* Extract Sink Pad Information */
+ width_in = xscaler->formats[XVIP_PAD_SINK].width;
+ height_in = xscaler->formats[XVIP_PAD_SINK].height;
+ code_in = xscaler->formats[XVIP_PAD_SINK].code;
+
+ /* Extract Source Pad Information */
+ width_out = xscaler->formats[XVIP_PAD_SOURCE].width;
+ height_out = xscaler->formats[XVIP_PAD_SOURCE].height;
+ code_out = xscaler->formats[XVIP_PAD_SOURCE].code;
+
+ /*
+ * V Scaler is before H Scaler
+ * V-Scaler_setup
+ */
+ line_rate = (height_in * STEP_PRECISION) / height_out;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_vscaler_select_coeff(xscaler, height_in, height_out);
+ if (ret < 0)
+ return ret;
+ xv_vscaler_set_coeff(xscaler);
+ }
+
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTIN_DATA, height_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_WIDTH_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_HEIGHTOUT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_HWREG_LINERATE_DATA, line_rate);
+ ret = xv_vscaler_setup_video_fmt(xscaler, code_in);
+ if (ret < 0)
+ return ret;
+
+ /* H-Scaler_setup */
+ pixel_rate = (width_in * STEP_PRECISION) / width_out;
+
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_HEIGHT_DATA, height_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHIN_DATA, width_in);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_WIDTHOUT_DATA, width_out);
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_HWREG_PIXELRATE_DATA, pixel_rate);
+ ret = xv_hscaler_setup_video_fmt(xscaler, code_out, ret);
+ if (ret < 0)
+ return ret;
+
+ if (xscaler->is_polyphase) {
+ ret = xv_hscaler_select_coeff(xscaler, width_in, width_out);
+ if (ret < 0)
+ return ret;
+ xv_hscaler_set_coeff(xscaler);
+ }
+
+ xv_hscaler_calculate_phases(xscaler, width_in, width_out, pixel_rate);
+ xv_hscaler_set_phases(xscaler);
+
+ /* Start Scaler sub-cores */
+ xvip_write(&xscaler->xvip, V_HSCALER_OFF +
+ XV_HSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xvip_write(&xscaler->xvip, V_VSCALER_OFF +
+ XV_VSCALER_CTRL_ADDR_AP_CTRL, XSCALER_STREAM_ON);
+ xv_procss_enable_block(&xscaler->xvip, XGPIO_CH_RESET_SEL,
+ XGPIO_RESET_MASK_VIDEO_IN);
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int xscaler_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ format = v4l2_subdev_get_try_format(subdev, cfg, fse->pad);
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ fse->min_width = XSCALER_MIN_WIDTH;
+ fse->max_width = xscaler->max_pixels;
+ fse->min_height = XSCALER_MIN_HEIGHT;
+ fse->max_height = xscaler->max_lines;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__xscaler_get_pad_format(struct xscaler_device *xscaler,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&xscaler->xvip.subdev, cfg,
+ pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &xscaler->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int xscaler_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+
+ fmt->format = *__xscaler_get_pad_format(xscaler, cfg, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int xscaler_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __xscaler_get_pad_format(xscaler, cfg, fmt->pad, fmt->which);
+ *format = fmt->format;
+
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ XSCALER_MIN_WIDTH, xscaler->max_pixels);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ XSCALER_MIN_HEIGHT, xscaler->max_lines);
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+
+static int
+xscaler_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ struct xscaler_device *xscaler = to_scaler(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Initialize with default formats */
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SINK);
+ *format = xscaler->default_formats[XVIP_PAD_SINK];
+
+ format = v4l2_subdev_get_try_format(subdev, fh->pad, XVIP_PAD_SOURCE);
+ *format = xscaler->default_formats[XVIP_PAD_SOURCE];
+
+ return 0;
+}
+
+static int
+xscaler_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops xscaler_video_ops = {
+ .s_stream = xscaler_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops xscaler_pad_ops = {
+ .enum_mbus_code = xvip_enum_mbus_code,
+ .enum_frame_size = xscaler_enum_frame_size,
+ .get_fmt = xscaler_get_format,
+ .set_fmt = xscaler_set_format,
+};
+
+static struct v4l2_subdev_ops xscaler_ops = {
+ .video = &xscaler_video_ops,
+ .pad = &xscaler_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops xscaler_internal_ops = {
+ .open = xscaler_open,
+ .close = xscaler_close,
+};
+
+/*
+ * Media Operations
+ */
+
+static const struct media_entity_operations xscaler_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+ * Platform Device Driver
+ */
+
+static int xscaler_parse_of(struct xscaler_device *xscaler)
+{
+ struct device *dev = xscaler->xvip.dev;
+ struct device_node *node = xscaler->xvip.dev->of_node;
+ const struct xvip_video_format *vip_format;
+ struct device_node *ports;
+ struct device_node *port;
+ int ret;
+ u32 port_id, dt_ppc;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ xscaler->aclk_axis = devm_clk_get(dev, "aclk_axis");
+ if (IS_ERR(xscaler->aclk_axis)) {
+ ret = PTR_ERR(xscaler->aclk_axis);
+ dev_err(dev, "failed to get aclk_axis (%d)\n", ret);
+ return ret;
+ }
+ xscaler->aclk_ctrl = devm_clk_get(dev, "aclk_ctrl");
+ if (IS_ERR(xscaler->aclk_ctrl)) {
+ ret = PTR_ERR(xscaler->aclk_ctrl);
+ dev_err(dev, "failed to get aclk_ctrl (%d)\n", ret);
+ return ret;
+ }
+ } else {
+ dev_info(dev, "assuming all required clocks are enabled!\n");
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-height",
+ &xscaler->max_lines);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-height is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_lines > XSCALER_MAX_HEIGHT ||
+ xscaler->max_lines < XSCALER_MIN_HEIGHT) {
+ dev_err(dev, "Invalid height in dt");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,max-width",
+ &xscaler->max_pixels);
+ if (ret < 0) {
+ dev_err(dev, "xlnx,max-width is missing!");
+ return -EINVAL;
+ } else if (xscaler->max_pixels > XSCALER_MAX_WIDTH ||
+ xscaler->max_pixels < XSCALER_MIN_WIDTH) {
+ dev_err(dev, "Invalid width in dt");
+ return -EINVAL;
+ }
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = node;
+
+ /* Get the format description for each pad */
+ for_each_child_of_node(ports, port) {
+ if (port->name && (of_node_cmp(port->name, "port") == 0)) {
+ vip_format = xvip_of_get_format(port);
+ if (IS_ERR(vip_format)) {
+ dev_err(dev, "invalid format in DT");
+ return PTR_ERR(vip_format);
+ }
+
+ ret = of_property_read_u32(port, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "No reg in DT");
+ return ret;
+ }
+
+ if (port_id != 0 && port_id != 1) {
+ dev_err(dev, "Invalid reg in DT");
+ return -EINVAL;
+ }
+ xscaler->vip_formats[port_id] = vip_format;
+ }
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-hori-taps",
+ &xscaler->num_hori_taps);
+ if (ret < 0)
+ return ret;
+
+ switch (xscaler->num_hori_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_HSCALER_TAPS_4:
+ xscaler->is_polyphase = false;
+ break;
+ case XV_HSCALER_TAPS_6:
+ case XV_HSCALER_TAPS_8:
+ case XV_HSCALER_TAPS_10:
+ case XV_HSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-hori-taps %d",
+ xscaler->num_hori_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-vert-taps",
+ &xscaler->num_vert_taps);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * For Bilinear and Bicubic case
+ * number of vertical and horizontal taps must match
+ */
+ switch (xscaler->num_vert_taps) {
+ case XV_HSCALER_TAPS_2:
+ case XV_VSCALER_TAPS_4:
+ if (xscaler->num_vert_taps != xscaler->num_hori_taps) {
+ dev_err(dev,
+ "H-scaler taps %d mismatches V-scaler taps %d",
+ xscaler->num_hori_taps,
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+ break;
+ case XV_VSCALER_TAPS_6:
+ case XV_VSCALER_TAPS_8:
+ case XV_VSCALER_TAPS_10:
+ case XV_VSCALER_TAPS_12:
+ xscaler->is_polyphase = true;
+ break;
+ default:
+ dev_err(dev, "Unsupported num-vert-taps %d",
+ xscaler->num_vert_taps);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,pix-per-clk", &dt_ppc);
+ if (ret < 0)
+ return ret;
+
+ /* Driver only supports 1 PPC and 2 PPC */
+ if (dt_ppc != XSCALER_PPC_1 && dt_ppc != XSCALER_PPC_2 &&
+ dt_ppc != XSCALER_PPC_4) {
+ dev_err(xscaler->xvip.dev,
+ "Unsupported xlnx,pix-per-clk(%d) value in DT", dt_ppc);
+ return -EINVAL;
+ }
+ xscaler->pix_per_clk = dt_ppc;
+
+ /* Reset GPIO */
+ xscaler->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(xscaler->rst_gpio)) {
+ if (PTR_ERR(xscaler->rst_gpio) != -EPROBE_DEFER)
+ dev_err(dev, "Reset GPIO not setup in DT");
+ return PTR_ERR(xscaler->rst_gpio);
+ }
+
+ return 0;
+}
+
+static int xscaler_probe(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler;
+ struct v4l2_subdev *subdev;
+ struct v4l2_mbus_framefmt *default_format;
+ int ret;
+ const struct of_device_id *match;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+
+ xscaler = devm_kzalloc(&pdev->dev, sizeof(*xscaler), GFP_KERNEL);
+ if (!xscaler)
+ return -ENOMEM;
+
+ xscaler->xvip.dev = &pdev->dev;
+
+ match = of_match_node(xscaler_of_id_table, node);
+ if (!match)
+ return -ENODEV;
+
+ if (!strncmp(match->compatible, xscaler_of_id_table[0].compatible,
+ strlen(xscaler_of_id_table[0].compatible))) {
+ dev_warn(&pdev->dev,
+ "%s - compatible string is getting deprecated!\n",
+ match->compatible);
+ }
+
+ xscaler->cfg = match->data;
+
+ ret = xscaler_parse_of(xscaler);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize coefficient parameters */
+ xscaler->max_num_phases = XSCALER_MAX_PHASES;
+
+ if (xscaler->cfg->flags & XSCALER_CLK_PROP) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xscaler->xvip.iomem = devm_ioremap_resource(xscaler->xvip.dev,
+ res);
+ if (IS_ERR(xscaler->xvip.iomem))
+ return PTR_ERR(xscaler->xvip.iomem);
+
+ ret = clk_prepare_enable(xscaler->aclk_axis);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_axis (%d)\n",
+ ret);
+ goto res_cleanup;
+ }
+
+ ret = clk_prepare_enable(xscaler->aclk_ctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable aclk_ctrl (%d)\n",
+ ret);
+ goto axis_clk_cleanup;
+ }
+ } else {
+ ret = xvip_init_resources(&xscaler->xvip);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Reset the Global IP Reset through a PS GPIO */
+ gpiod_set_value_cansleep(xscaler->rst_gpio, XSCALER_RESET_DEASSERT);
+ /* Reset internal GPIO within the IP */
+ xscaler_reset(xscaler);
+
+ /* Initialize V4L2 subdevice and media entity */
+ subdev = &xscaler->xvip.subdev;
+ v4l2_subdev_init(subdev, &xscaler_ops);
+ subdev->dev = &pdev->dev;
+ subdev->internal_ops = &xscaler_internal_ops;
+ strlcpy(subdev->name, dev_name(&pdev->dev), sizeof(subdev->name));
+ v4l2_set_subdevdata(subdev, xscaler);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize default and active formats */
+ default_format = &xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SINK]->code;
+ default_format->field = V4L2_FIELD_NONE;
+ default_format->colorspace = V4L2_COLORSPACE_SRGB;
+ default_format->width = XSCALER_DEF_IN_WIDTH;
+ default_format->height = XSCALER_DEF_IN_HEIGHT;
+ xscaler->formats[XVIP_PAD_SINK] = *default_format;
+
+ default_format = &xscaler->default_formats[XVIP_PAD_SOURCE];
+ *default_format = xscaler->default_formats[XVIP_PAD_SINK];
+ default_format->code = xscaler->vip_formats[XVIP_PAD_SOURCE]->code;
+ default_format->width = XSCALER_DEF_OUT_WIDTH;
+ default_format->height = XSCALER_DEF_OUT_HEIGHT;
+ xscaler->formats[XVIP_PAD_SOURCE] = *default_format;
+
+ xscaler->pads[XVIP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ xscaler->pads[XVIP_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ subdev->entity.ops = &xscaler_media_ops;
+
+ ret = media_entity_pads_init(&subdev->entity, 2, xscaler->pads);
+ if (ret < 0)
+ goto error;
+
+ platform_set_drvdata(pdev, xscaler);
+
+ ret = v4l2_async_register_subdev(subdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to register subdev");
+ goto error;
+ }
+ dev_info(xscaler->xvip.dev, "Num Hori Taps %d",
+ xscaler->num_hori_taps);
+ dev_info(xscaler->xvip.dev, "Num Vert Taps %d",
+ xscaler->num_vert_taps);
+ dev_info(&pdev->dev, "VPSS Scaler Probe Successful");
+ return 0;
+
+error:
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+axis_clk_cleanup:
+ clk_disable_unprepare(xscaler->aclk_axis);
+res_cleanup:
+ xvip_cleanup_resources(&xscaler->xvip);
+ return ret;
+}
+
+static int xscaler_remove(struct platform_device *pdev)
+{
+ struct xscaler_device *xscaler = platform_get_drvdata(pdev);
+ struct v4l2_subdev *subdev = &xscaler->xvip.subdev;
+
+ v4l2_async_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+ clk_disable_unprepare(xscaler->aclk_ctrl);
+ clk_disable_unprepare(xscaler->aclk_axis);
+ xvip_cleanup_resources(&xscaler->xvip);
+
+ return 0;
+}
+
+static struct platform_driver xscaler_driver = {
+ .driver = {
+ .name = "xilinx-vpss-scaler",
+ .of_match_table = xscaler_of_id_table,
+ },
+ .probe = xscaler_probe,
+ .remove = xscaler_remove,
+};
+
+module_platform_driver(xscaler_driver);
+MODULE_DESCRIPTION("Xilinx Scaler VPSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
index 0ae0208d7529..d69f48dfdac4 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.c
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -141,6 +141,9 @@
#define XVTC_GENERATOR_GLOBAL_DELAY 0x0104
+/* Value of 1 = .01% */
+#define XVTC_CLK_MAX_PCT_ERR 1
+
/**
* struct xvtc_device - Xilinx Video Timing Controller device structure
* @xvip: Xilinx Video IP device
@@ -175,10 +178,25 @@ int xvtc_generator_start(struct xvtc_device *xvtc,
const struct xvtc_config *config)
{
int ret;
+ unsigned long s_rate;
+ unsigned long g_rate;
+ unsigned long clk_err;
if (!xvtc->has_generator)
return -ENXIO;
+ s_rate = config->fps * config->hsize * config->vsize;
+ ret = clk_set_rate(xvtc->xvip.clk, s_rate);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that the clock is within a reasonable tolerance. */
+ g_rate = clk_get_rate(xvtc->xvip.clk);
+ clk_err = (abs(g_rate - s_rate) * 10000) / (s_rate);
+ if (clk_err > XVTC_CLK_MAX_PCT_ERR)
+ dev_warn(xvtc->xvip.dev, "Failed to set clk rate: %lu, actual rate: %lu\n",
+ s_rate, g_rate);
+
ret = clk_prepare_enable(xvtc->xvip.clk);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 855845911ffc..0f360ed55f34 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -27,6 +27,7 @@ struct xvtc_config {
unsigned int vsync_start;
unsigned int vsync_end;
unsigned int vsize;
+ unsigned int fps;
};
struct xvtc_device *xvtc_of_get(struct device_node *np);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index cd60c6c1749e..92e8ce715a96 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -185,14 +185,27 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
static void uvc_stop_streaming(struct vb2_queue *vq)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
+ struct uvc_streaming *stream = uvc_queue_to_stream(queue);
lockdep_assert_irqs_enabled();
+ /* Prevent new buffers coming in. */
+ spin_lock_irq(&queue->irqlock);
+ queue->flags |= UVC_QUEUE_STOPPING;
+ spin_unlock_irq(&queue->irqlock);
+
+ /*
+ * All pending work should be completed before disabling the stream, as
+ * all URBs will be free'd during uvc_video_enable(s, 0).
+ */
+ flush_workqueue(stream->async_wq);
+
if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
uvc_video_stop_streaming(uvc_queue_to_stream(queue));
spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
+ queue->flags &= ~UVC_QUEUE_STOPPING;
spin_unlock_irq(&queue->irqlock);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 8fa77a81dd7f..f3f783e1e4f9 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1097,6 +1097,20 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
+static void uvc_video_copy_packets(struct uvc_urb *uvc_urb)
+{
+ unsigned int i;
+
+ for (i = 0; i < uvc_urb->async_operations; i++) {
+ struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
+
+ memcpy(op->dst, op->src, op->len);
+
+ /* Release reference taken on this buffer. */
+ uvc_queue_buffer_release(op->buf);
+ }
+}
+
/*
* uvc_video_decode_data_work: Asynchronous memcpy processing
*
@@ -1106,22 +1120,26 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
static void uvc_video_copy_data_work(struct work_struct *work)
{
struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work);
- unsigned int i;
+ struct uvc_streaming *stream = uvc_urb->stream;
+ struct uvc_video_queue *queue = &stream->queue;
int ret;
- for (i = 0; i < uvc_urb->async_operations; i++) {
- struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
-
- memcpy(op->dst, op->src, op->len);
+ uvc_video_copy_packets(uvc_urb);
- /* Release reference taken on this buffer. */
- uvc_queue_buffer_release(op->buf);
+ /*
+ * Prevent resubmitting URBs when shutting down to ensure that no new
+ * work item will be scheduled after uvc_stop_streaming() flushes the
+ * work queue.
+ */
+ spin_lock_irq(&queue->irqlock);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ if (ret < 0)
+ uvc_printk(KERN_ERR,
+ "Failed to resubmit video URB (%d).\n",
+ ret);
}
-
- ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
- if (ret < 0)
- uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
- ret);
+ spin_unlock_irq(&queue->irqlock);
}
static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
@@ -1524,6 +1542,10 @@ static void uvc_video_complete(struct urb *urb)
buf = uvc_queue_get_current_buffer(queue);
+ /*
+ * Process the URB headers, and optionally queue expensive memcpy tasks
+ * to be deferred to a work queue.
+ */
if (vb2_qmeta) {
spin_lock_irqsave(&qmeta->irqlock, flags);
if (!list_empty(&qmeta->irqqueue))
@@ -1551,7 +1573,21 @@ static void uvc_video_complete(struct urb *urb)
return;
}
- queue_work(stream->async_wq, &uvc_urb->work);
+ /*
+ * When the stream is stopped, all URBs are freed as part of the call to
+ * uvc_stop_streaming() and must not be handled asynchronously. In that
+ * event we can safely complete the packet work directly in this
+ * context, without resubmitting the URB.
+ */
+ spin_lock_irqsave(&queue->irqlock, flags);
+ if (!(queue->flags & UVC_QUEUE_STOPPING)) {
+ /* Handle any heavy lifting required */
+ INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work);
+ queue_work(stream->async_wq, &uvc_urb->work);
+ } else {
+ uvc_video_copy_packets(uvc_urb);
+ }
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
/*
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6ab972c643e3..f1b6a6000532 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -173,9 +173,9 @@
#define DRIVER_VERSION "1.1.1"
/* Number of isochronous URBs. */
-#define UVC_URBS 5
+#define UVC_URBS 50
/* Maximum number of packets per URB. */
-#define UVC_MAX_PACKETS 32
+#define UVC_MAX_PACKETS 48
/* Maximum status buffer size in bytes of interrupt URB. */
#define UVC_MAX_STATUS_SIZE 16
@@ -421,6 +421,7 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
+#define UVC_QUEUE_STOPPING (1 << 2)
struct uvc_video_queue {
struct vb2_queue queue;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b2ef8e60ea7d..69aeb465b0b1 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/version.h>
+#include <linux/v4l2-subdev.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
@@ -1217,6 +1218,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XBGR30: descr = "32-bit XBGR 2-10-10-10"; break;
+ case V4L2_PIX_FMT_XBGR40: descr = "40-bit XBGR 4-12-12-12"; break;
+ case V4L2_PIX_FMT_BGR48: descr = "48-bit BGR 16-16-16"; break;
case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
@@ -1245,6 +1249,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XVUY32: descr = "32-bit packed XVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AVUY32: descr = "32-bit packed AVUY 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUY24: descr = "24-bit packed VUY 8-8-8"; break;
case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
@@ -1257,24 +1264,41 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+ case V4L2_PIX_FMT_XVUY10: descr = "XVUY 2-10-10-10"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
case V4L2_PIX_FMT_NV16: descr = "Y/CbCr 4:2:2"; break;
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_XV15: descr = "Y/CrCb 4:2:0 10-bit"; break;
+ case V4L2_PIX_FMT_XV20: descr = "Y/CrCb 4:2:2 10-bit"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV61M: descr = "Y/CrCb 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_NV12MT: descr = "Y/CbCr 4:2:0 (64x32 MB, N-C)"; break;
case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/CbCr 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_XV20M: descr = "Y/CrCb 4:2:2 10-bit (N-C)"; break;
+ case V4L2_PIX_FMT_XV15M: descr = "Y/CrCb 4:2:0 10-bit (N-C)"; break;
case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break;
case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break;
case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_X012: descr = "Y/CbCr 4:2:0, 4-12-12-12"; break;
+ case V4L2_PIX_FMT_X012M: descr = "Y/CbCr 4:2:0, 4-12-12-12 (N-C)"; break;
+ case V4L2_PIX_FMT_X212: descr = "Y/CbCr 4:2:2, 4-12-12-12"; break;
+ case V4L2_PIX_FMT_X212M: descr = "Y/CbCr 4:2:2, 4-12-12-12 (N-C)"; break;
+ case V4L2_PIX_FMT_X412: descr = "Y/CbCr 4:4:4, 4-12-12-12"; break;
+ case V4L2_PIX_FMT_X412M: descr = "Y/CbCr 4:4:4, 4-12-12-12 (N-C)"; break;
+ case V4L2_PIX_FMT_X016: descr = "Y/CbCr 4:2:0, 16-16-16"; break;
+ case V4L2_PIX_FMT_X016M: descr = "Y/CbCr 4:2:0, 16-16-16 (N-C)"; break;
+ case V4L2_PIX_FMT_X216: descr = "Y/CbCr 4:2:2, 16-16-16"; break;
+ case V4L2_PIX_FMT_X216M: descr = "Y/CbCr 4:2:2, 16-16-16 (N-C)"; break;
+ case V4L2_PIX_FMT_X416: descr = "Y/CbCr 4:4:4, 16-16-16"; break;
+ case V4L2_PIX_FMT_X416M: descr = "Y/CbCr 4:4:4, 16-16-16 (N-C)"; break;
case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break;
case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break;
case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break;
@@ -3023,6 +3047,23 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
}
break;
}
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = parg;
+
+ if (route->num_routes > 0) {
+ if (route->num_routes > 256)
+ return -EINVAL;
+
+ *user_ptr = (void __user *)route->routes;
+ *kernel_ptr = (void *)&route->routes;
+ *array_size = sizeof(struct v4l2_plane)
+ * route->num_routes;
+ ret = 1;
+ }
+ break;
+ }
}
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index a376b351135f..9987164793f8 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -606,6 +606,33 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_S_DV_TIMINGS:
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+ case VIDIOC_SUBDEV_G_ROUTING:
+ return v4l2_subdev_call(sd, pad, get_routing, arg);
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *route = arg;
+ unsigned int i;
+
+ if (route->num_routes > sd->entity.num_pads)
+ return -EINVAL;
+
+ for (i = 0; i < route->num_routes; ++i) {
+ unsigned int sink = route->routes[i].sink;
+ unsigned int source = route->routes[i].source;
+ struct media_pad *pads = sd->entity.pads;
+
+ if (sink >= sd->entity.num_pads ||
+ source >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[sink].flags & MEDIA_PAD_FL_SINK) ||
+ !(pads[source].flags & MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sd, pad, set_routing, route);
+ }
+
case VIDIOC_SUBDEV_G_STD:
return v4l2_subdev_call(sd, video, g_std, arg);
@@ -628,6 +655,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_SUBDEV_QUERYSTD:
return v4l2_subdev_call(sd, video, querystd, arg);
#endif
+
default:
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f..396a9dec0bab 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -453,6 +453,27 @@ config XILINX_SDFEC
If unsure, say N.
+config XILINX_FLEX_PM
+ tristate "Xilinx Flexnoc Performance Monitor"
+ help
+ This option enables support for the Xilinx Flex Noc Performance Monitor driver.
+ It monitors the read and write transactions. It has counters for the LPD and
+ FPD domains.
+
+ If unsure, say N
+
+config XILINX_TRAFGEN
+ tristate "Xilinx Traffic Generator"
+ help
+ This option enables support for the Xilinx Traffic Generator driver.
+ It is designed to generate AXI4 traffic which can be used to stress
+ different modules/interconnect connected in the system. Different
+ configurable options which are provided through sysfs entries allow
+ allow the user to generate a wide variety of traffic based on their
+ their requirements.
+
+ If unsure, say N
+
config MISC_RTSX
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
@@ -465,6 +486,7 @@ config PVPANIC
a paravirtualized device provided by QEMU; it lets a virtual machine
(guest) communicate panic events to the host.
+source "drivers/misc/jesd204b/Kconfig"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d831..fe6b135b11db 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -58,3 +58,6 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_XILINX_FLEX_PM) += xilinx_flex_pm.o
+obj-$(CONFIG_XILINX_TRAFGEN) += xilinx_trafgen.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b/
diff --git a/drivers/misc/jesd204b/Kconfig b/drivers/misc/jesd204b/Kconfig
new file mode 100644
index 000000000000..aff08cfe8f82
--- /dev/null
+++ b/drivers/misc/jesd204b/Kconfig
@@ -0,0 +1,28 @@
+#
+# Jesd204b support
+#
+
+config XILINX_JESD204B
+ tristate "Xilinx JESD204B"
+ help
+ This option enables support for the Xilinx JESD204B driver.
+ It is designed to allow user to access JESD204B IP registers
+ with sysfs entries. JESD204B is the protocol used by High-Speed
+ data converters to transfer data to FPGA/ASIC.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b.
+
+config XILINX_JESD204B_PHY
+ tristate "JESD Phy Driver"
+ depends on XILINX_JESD204B
+ help
+ This is JESD204b Phy Interface. It enables support for xilinx jesd204b phy
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jesd204b_phy.
diff --git a/drivers/misc/jesd204b/Makefile b/drivers/misc/jesd204b/Makefile
new file mode 100644
index 000000000000..7723fcb002c2
--- /dev/null
+++ b/drivers/misc/jesd204b/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XILINX_JESD204B_PHY) += jesd204b_phy.o
+jesd204b_phy-y += jesd_phy.o gtx7s_cpll_bands.o \
+ gtx7s_qpll_bands.o
+obj-$(CONFIG_XILINX_JESD204B) += jesd204b.o
+jesd204b-y += xilinx_jesd204b.o
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.c b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
new file mode 100644
index 000000000000..a9610f7ade67
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_cpll_bands.h"
+
+static const u32 gtx7s_cpll_channel_address_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR
+};
+
+static const u32 gtx7s_cpll_channel_offset_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET
+};
+
+static const u32 gtx7s_cpll_channel_mask_lut
+ [GTX7S_CPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK
+};
+
+/* Note bands run vertically from 1 to 4 */
+static const u16 gtx7s_cpll_channel_param_lut[GTX7S_CPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_CPLL_NUM_LINE_RATE_BANDS] = {
+ {0x20, 0x20, 0x20, 0x20 },/* RXCDR_CFG0 */
+ {0x1010, 0x1020, 0x1040, 0x1040 },/* RXCDR_CFG1 */
+ {0x23ff, 0x23ff, 0x23ff, 0x23ff },/* RXCDR_CFG2 */
+ {0x0, 0x0, 0x0, 0x0 },/* RXCDR_CFG3 */
+ {0x3, 0x3, 0x3, 0x3 },/* RXCDR_CFG4 */
+ {0x3, 0x2, 0x1, 0x1 },/* RXOUT_DIV */
+ {0x3, 0x2, 0x1, 0x1 },/* TXOUT_DIV */
+ {0x904, 0x904, 0x904, 0x104 } /* RX_DFE_LPM_CFG */
+};
+
+u32 get_gtx7s_cpll_address_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_cpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_cpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_cpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_cpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_cpll_bands.h b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
new file mode 100644
index 000000000000..f53f20de2cda
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_cpll_bands.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+
+#ifndef GTX7S_CPLL_BANDS_H_
+#define GTX7S_CPLL_BANDS_H_
+
+#define GTX7S_CPLL_NUM_CHANNEL_DRP_REGS 8
+#define GTX7S_CPLL_NUM_LINE_RATE_BANDS 4
+
+u32 get_gtx7s_cpll_address_lut(u32);
+u32 get_gtx7s_cpll_offset_lut(u32);
+u32 get_gtx7s_cpll_mask_lut(u32);
+u16 get_gtx7s_cpll_param_lut(u32, u32);
+
+#endif /* GTX7S_CPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.c b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
new file mode 100644
index 000000000000..71e70a611bb7
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include "s7_gtxe2_drp.h"
+#include "gtx7s_qpll_bands.h"
+
+static const u32 gtx7s_qpll_channel_address_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_ADDR,
+ RXCDR_CFG1_ADDR,
+ RXCDR_CFG2_ADDR,
+ RXCDR_CFG3_ADDR,
+ RXCDR_CFG4_ADDR,
+ RXOUT_DIV_ADDR,
+ TXOUT_DIV_ADDR,
+ RX_DFE_LPM_CFG_ADDR,
+ QPLL_CFG0_ADDR,
+ QPLL_CFG1_ADDR
+};
+
+static const u32 gtx7s_qpll_channel_offset_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_OFFSET,
+ RXCDR_CFG1_OFFSET,
+ RXCDR_CFG2_OFFSET,
+ RXCDR_CFG3_OFFSET,
+ RXCDR_CFG4_OFFSET,
+ RXOUT_DIV_OFFSET,
+ TXOUT_DIV_OFFSET,
+ RX_DFE_LPM_CFG_OFFSET,
+ QPLL_CFG0_OFFSET,
+ QPLL_CFG1_OFFSET
+};
+
+static const u32 gtx7s_qpll_channel_mask_lut
+ [GTX7S_QPLL_NUM_CHANNEL_DRP_REGS] = {
+ RXCDR_CFG0_MASK,
+ RXCDR_CFG1_MASK,
+ RXCDR_CFG2_MASK,
+ RXCDR_CFG3_MASK,
+ RXCDR_CFG4_MASK,
+ RXOUT_DIV_MASK,
+ TXOUT_DIV_MASK,
+ RX_DFE_LPM_CFG_MASK,
+ QPLL_CFG0_MASK,
+ QPLL_CFG1_MASK
+};
+
+/* Note bands run vertically from 1 to 10 */
+static const u16 gtx7s_qpll_channel_param_lut[GTX7S_QPLL_NUM_CHANNEL_DRP_REGS]
+ [GTX7S_QPLL_NUM_LINE_RATE_BANDS] = {
+{0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20},/* RXCDR_CFG0 */
+{0x1008, 0x1010, 0x1020, 0x1010, 0x1020, 0x1040, 0x1020, 0x1040, 0x1040, 0x1040},/* RXCDR_CFG1 */
+{0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff, 0x23ff},/* RXCDR_CFG2 */
+{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, /* RXCDR_CFG3 */
+{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},/* RXCDR_CFG4 */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* RXOUT_DIV */
+{0x3e8, 0x4, 0x2, 0x3, 0x2, 0x1, 0x2, 0x1, 0x1, 0x1},/* TXOUT_DIV */
+{0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x904, 0x104, 0x104},/* RX_DFE_LPM_CFG */
+{0x1c1, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181, 0x1c1, 0x1c1, 0x181},/* QPLL_CFG0 */
+{0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68} /* QPLL_CFG1 */
+};
+
+u32 get_gtx7s_qpll_address_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_address_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_offset_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_offset_lut[lut_address];
+}
+
+u32 get_gtx7s_qpll_mask_lut(u32 lut_address)
+{
+ return gtx7s_qpll_channel_mask_lut[lut_address];
+}
+
+u16 get_gtx7s_qpll_param_lut(u32 param_address, u32 band_address)
+{
+ return gtx7s_qpll_channel_param_lut[param_address][band_address];
+}
diff --git a/drivers/misc/jesd204b/gtx7s_qpll_bands.h b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
new file mode 100644
index 000000000000..8b9f6c24efb4
--- /dev/null
+++ b/drivers/misc/jesd204b/gtx7s_qpll_bands.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+
+#ifndef GTX7S_QPLL_BANDS_H_
+#define GTX7S_QPLL_BANDS_H_
+
+#define GTX7S_QPLL_NUM_CHANNEL_DRP_REGS 10
+#define GTX7S_QPLL_NUM_LINE_RATE_BANDS 10
+
+u32 get_gtx7s_qpll_address_lut(u32);
+u32 get_gtx7s_qpll_offset_lut(u32);
+u32 get_gtx7s_qpll_mask_lut(u32);
+u16 get_gtx7s_qpll_param_lut(u32, u32);
+
+#endif /* GTX7S_QPLL_BANDS_H_ */
diff --git a/drivers/misc/jesd204b/jesd_phy.c b/drivers/misc/jesd204b/jesd_phy.c
new file mode 100644
index 000000000000..c35d9433d0fc
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.c
@@ -0,0 +1,384 @@
+/*
+ * Jesd204b phy support
+ *
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "jesd_phy.h"
+#include "gtx7s_cpll_bands.h"
+#include "gtx7s_qpll_bands.h"
+
+#define PLATFORM_JESD204_PHY_ADDR 0x41E10000
+#define JESD_PHY_LOOP_OFF 0
+#define JESD_PHY_LOOP_PCS 1
+#define JESD_PHY_LOOP_PMA 2
+#define JESD_PHY_LOOP_MAX 2
+
+static inline void jesd204b_phy_write(struct jesd204b_phy_state *st,
+ unsigned reg, unsigned val)
+{
+ iowrite32(val, st->phy + reg);
+}
+
+static inline unsigned int jesd204b_phy_read(struct jesd204b_phy_state *st,
+ unsigned reg)
+{
+ return ioread32(st->phy + reg);
+}
+
+#define NUM_GT_CHANNELS 8
+
+#define QPLL 0x3 /* QPLL (7 series) QPLL1 (UltraScale) */
+#define QPLL0 0x2 /* (UltraScale Only) */
+#define CPLL 0x0
+
+#define DRPREAD BIT(30)
+#define DRPWRITE BIT(31)
+
+#define NR_COMMON_DRP_INTERFACES 0x008
+#define NR_TRANS_DRP_INTERFACES 0x00C
+
+#define CHANNEL_DRP_BASE 0x200
+#define CHANNEL_DRP_ADDR 0x204
+#define CHANNEL_DRP_DREAD 0x20C
+#define CHANNEL_DRP_DWRITE 0x208
+#define CHANNEL_DRP_STAT 0x214
+
+#define CHANNEL_XCVR_SEL 0x400
+#define CHANNEL_XCVR_TXPLL 0x40C
+#define CHANNEL_XCVR_RXPLL 0x410
+#define CHANNEL_XCVR_LOOPB 0x41C
+
+static u32 read_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr)
+{
+ u32 temp;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPREAD | addr));
+ temp = jesd204b_phy_read(st, CHANNEL_DRP_DREAD);
+ return temp;
+}
+
+static void write_channel_drp_reg(struct jesd204b_phy_state *st, u32 addr,
+ u32 data)
+{
+ u32 loop = 10;
+
+ jesd204b_phy_write(st, CHANNEL_DRP_DWRITE, data);
+ jesd204b_phy_write(st, CHANNEL_DRP_ADDR, (DRPWRITE | addr));
+
+ do {
+ if (!jesd204b_phy_read(st, CHANNEL_DRP_STAT))
+ break;
+ msleep(1);
+ } while (loop--);
+
+ if (!loop)
+ dev_err(st->dev, "DRP wait timeout\n");
+}
+
+static void read_plls(struct jesd204b_phy_state *st)
+{
+ int i;
+ int pll = st->pll;
+ u32 no_of_common_drp_interfaces = 1;
+
+ if (st->pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_TXPLL);
+ pll = jesd204b_phy_read(st, CHANNEL_XCVR_RXPLL);
+ }
+}
+
+static void configure_plls(struct jesd204b_phy_state *st, u32 pll)
+{
+ int i;
+ u32 no_of_common_drp_interfaces;
+
+ if (pll == CPLL)
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+ else
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_COMMON_DRP_INTERFACES);
+
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_TXPLL, pll);
+ jesd204b_phy_write(st, CHANNEL_XCVR_RXPLL, pll);
+ }
+}
+
+static void configure_channel_drp(struct jesd204b_phy_state *st, u32 setting)
+{
+ u32 i, j, addr, temp, no_of_common_drp_interfaces;
+ u32 no_channel_drp_reg = GTX7S_QPLL_NUM_CHANNEL_DRP_REGS;
+
+ no_of_common_drp_interfaces = jesd204b_phy_read(
+ st, NR_TRANS_DRP_INTERFACES);
+
+ if (st->pll == CPLL)
+ no_channel_drp_reg = GTX7S_CPLL_NUM_CHANNEL_DRP_REGS;
+ for (i = 0; i < no_of_common_drp_interfaces; i++) {
+ jesd204b_phy_write(st, CHANNEL_DRP_BASE, i);
+ for (j = 0; j < no_channel_drp_reg; j++) {
+ /* Get the register address */
+ if (st->pll == QPLL) {
+ addr = get_gtx7s_qpll_address_lut(j);
+
+ /* Read the register */
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_qpll_mask_lut(j)));
+ temp |= ((get_gtx7s_qpll_param_lut(j, setting)
+ << get_gtx7s_qpll_offset_lut(j))
+ & get_gtx7s_qpll_mask_lut(j));
+ } else {
+ addr = get_gtx7s_cpll_address_lut(j);
+
+ temp = read_channel_drp_reg(st, addr);
+
+ temp &= (0xFFFF ^ (get_gtx7s_cpll_mask_lut(j)));
+ temp |= ((get_gtx7s_cpll_param_lut(j, setting)
+ << get_gtx7s_cpll_offset_lut(j))
+ & get_gtx7s_cpll_mask_lut(j));
+ }
+ write_channel_drp_reg(st, addr, temp);
+ }
+ }
+}
+
+void jesd204_phy_set_speed(struct jesd204b_phy_state *st, u32 band)
+{
+ /* make sure we have the correct PLL's selected. */
+ configure_channel_drp(st, band);
+}
+
+static void jesd204_phy_init(struct jesd204b_phy_state *st, int line_rate)
+{
+ jesd204_phy_set_speed(st, line_rate);
+}
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval)
+{
+ int i;
+ u32 no_of_channels;
+
+ no_of_channels = jesd204b_phy_read(st, NR_COMMON_DRP_INTERFACES);
+
+ if (loopval > JESD_PHY_LOOP_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < no_of_channels ; i++) {
+ jesd204b_phy_write(st, CHANNEL_XCVR_SEL, i);
+ jesd204b_phy_write(st, CHANNEL_XCVR_LOOPB, loopval);
+ }
+ return 0;
+}
+
+static ssize_t jesd204b_pll_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ read_plls(st);
+ if (st->pll == CPLL)
+ return sprintf(buf, "cpll\n");
+ return sprintf(buf, "qpll\n");
+}
+
+static ssize_t jesd204b_configure_pll(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ unsigned val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (!ret)
+ return 0;
+
+ if (val > QPLL) {
+ dev_err(dev, "Setting the pll to %d valid values\n"
+ "00 = CPLL\n"
+ "10 = QPLL0 (UltraScale Only)\n"
+ "11 = QPLL (7 series) QPLL1 (UltraScale)\n", val);
+ return 0;
+ }
+ st->pll = val;
+ configure_plls(st, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(configure_pll, S_IWUSR | S_IRUSR, jesd204b_pll_read,
+ jesd204b_configure_pll);
+
+static ssize_t jesd204b_linerate_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", st->band);
+}
+
+static ssize_t jesd204b_linerate_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_phy_state *st = dev_get_drvdata(dev);
+ int ret;
+ /* Low frequencies are not supported by qpll */
+
+ ret = kstrtouint(buf, 0, &st->band);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Setting the line rate to band to %d\n", st->band);
+ /* QPLL - freq options in phy
+ * 62.5
+ * 78.125
+ * 94.697
+ * 97.656
+ * 125.000
+ * 156.25
+ * 187.5
+ * 189.394
+ * 195.313
+ * 234.375
+ * 250.000
+ * 284.091
+ * 292.969
+ */
+ if (st->band == 2)
+ clk_set_rate(st->clk, 62500000); /* 2.5G */
+ else if (st->band == 4)
+ clk_set_rate(st->clk, 97656000); /* 3.9G */
+ else if (st->band == 6)
+ clk_set_rate(st->clk, 125000000); /* 5G */
+ else if (st->band == 7)
+ clk_set_rate(st->clk, 156250000); /* 6.25G */
+ else if (st->band == 8)
+ clk_set_rate(st->clk, 195313000); /* 7.812G */
+ else if (st->band == 9)
+ clk_set_rate(st->clk, 250000000);/* 10G */
+
+ jesd204_phy_init(st, st->band);
+
+ return count;
+}
+
+static DEVICE_ATTR(line_rate_band, S_IWUSR | S_IRUSR, jesd204b_linerate_read,
+ jesd204b_linerate_write);
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_phy_of_match[] = {
+ { .compatible = "xlnx,jesd204-phy-2.0", },
+ { /* end of list */ },
+};
+
+static int jesd204b_phy_probe(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st;
+ struct resource *mem; /* IO mem resources */
+ int ret;
+ u32 ref_clk;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->clk))
+ return -EPROBE_DEFER;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->phy = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->phy)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->phy);
+ }
+ st->dev = &pdev->dev;
+ platform_set_drvdata(pdev, st);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,pll-selection",
+ &st->pll);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,gt-refclk-freq",
+ &ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read required dt property\n");
+ return ret;
+ }
+
+ clk_set_rate(st->clk, (unsigned long)ref_clk);
+ device_create_file(&pdev->dev, &dev_attr_configure_pll);
+ device_create_file(&pdev->dev, &dev_attr_line_rate_band);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jesd204b_phy_remove(struct platform_device *pdev)
+{
+ struct jesd204b_phy_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+ device_remove_file(&pdev->dev, &dev_attr_configure_pll);
+ device_remove_file(&pdev->dev, &dev_attr_line_rate_band);
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = "jesd204b_phy",
+ .of_match_table = jesd204b_phy_of_match,
+ },
+ .probe = jesd204b_phy_probe,
+ .remove = jesd204b_phy_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhraj@xilinx.com>");
+MODULE_DESCRIPTION("AXI-JESD204B Phy Interface Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/jesd204b/jesd_phy.h b/drivers/misc/jesd204b/jesd_phy.h
new file mode 100644
index 000000000000..c15328532c3f
--- /dev/null
+++ b/drivers/misc/jesd204b/jesd_phy.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef JESD_PHY_H_
+#define JESD_PHY_H_
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+struct jesd204b_phy_state {
+ struct device *dev;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 vers_id;
+ u32 addr;
+ u32 lanes;
+ u32 band;
+ u32 pll;
+ unsigned long rate;
+};
+
+int jesd204_phy_set_loop(struct jesd204b_phy_state *st, u32 loopval);
+
+#endif /* JESD_PHY_H_ */
diff --git a/drivers/misc/jesd204b/s7_gtxe2_drp.h b/drivers/misc/jesd204b/s7_gtxe2_drp.h
new file mode 100644
index 000000000000..f08a211432bd
--- /dev/null
+++ b/drivers/misc/jesd204b/s7_gtxe2_drp.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define TXOUT_DIV_ADDR 0x88
+#define TXOUT_DIV_MASK 0x70
+#define TXOUT_DIV_OFFSET 0x4
+#define TXOUT_DIV_WIDTH 0x3
+#define TXOUT_DIV_DEFAULT 0x0
+
+#define RXOUT_DIV_ADDR 0x88
+#define RXOUT_DIV_MASK 0x7
+#define RXOUT_DIV_OFFSET 0x0
+#define RXOUT_DIV_WIDTH 0x3
+#define RXOUT_DIV_DEFAULT 0x0
+
+#define RXCDR_CFG0_ADDR 0xa8
+#define RXCDR_CFG0_MASK 0xffff
+#define RXCDR_CFG0_OFFSET 0x0
+#define RXCDR_CFG0_WIDTH 0x10
+#define RXCDR_CFG0_DEFAULT 0x0
+
+#define RXCDR_CFG1_ADDR 0xa9
+#define RXCDR_CFG1_MASK 0xffff
+#define RXCDR_CFG1_OFFSET 0x0
+#define RXCDR_CFG1_WIDTH 0x10
+#define RXCDR_CFG1_DEFAULT 0x0
+
+#define RXCDR_CFG2_ADDR 0xaa
+#define RXCDR_CFG2_MASK 0xffff
+#define RXCDR_CFG2_OFFSET 0x0
+#define RXCDR_CFG2_WIDTH 0x10
+#define RXCDR_CFG2_DEFAULT 0x0
+
+#define RXCDR_CFG3_ADDR 0xab
+#define RXCDR_CFG3_MASK 0xffff
+#define RXCDR_CFG3_OFFSET 0x0
+#define RXCDR_CFG3_WIDTH 0x10
+#define RXCDR_CFG3_DEFAULT 0x0
+
+#define RXCDR_CFG4_ADDR 0xac
+#define RXCDR_CFG4_MASK 0xff
+#define RXCDR_CFG4_OFFSET 0x0
+#define RXCDR_CFG4_WIDTH 0x8
+#define RXCDR_CFG4_DEFAULT 0x0
+
+#define RX_DFE_LPM_CFG_ADDR 0x29
+#define RX_DFE_LPM_CFG_MASK 0xffff
+#define RX_DFE_LPM_CFG_OFFSET 0x0
+#define RX_DFE_LPM_CFG_WIDTH 0x10
+#define RX_DFE_LPM_CFG_DEFAULT 0x0
+
+#define QPLL_CFG0_ADDR 0x32
+#define QPLL_CFG0_MASK 0xffff
+#define QPLL_CFG0_OFFSET 0x0
+#define QPLL_CFG0_WIDTH 0x10
+#define QPLL_CFG0_DEFAULT 0x0
+
+#define QPLL_CFG1_ADDR 0x33
+#define QPLL_CFG1_MASK 0x7ff
+#define QPLL_CFG1_OFFSET 0x0
+#define QPLL_CFG1_WIDTH 0xb
+#define QPLL_CFG1_DEFAULT 0x0
+
+#define QPLL_REFCLK_DIV_M_ADDR 0x33
+#define QPLL_REFCLK_DIV_M_MASK 0xf800
+#define QPLL_REFCLK_DIV_M_OFFSET 0xb
+#define QPLL_REFCLK_DIV_M_WIDTH 0x5
+#define QPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define QPLL_FBDIV_N_ADDR 0x36
+#define QPLL_FBDIV_N_MASK 0x3ff
+#define QPLL_FBDIV_N_OFFSET 0x0
+#define QPLL_FBDIV_N_WIDTH 0xa
+#define QPLL_FBDIV_N_DEFAULT 0x0
+
+#define QPLL_FBDIV_RATIO_ADDR 0x37
+#define QPLL_FBDIV_RATIO_MASK 0x40
+#define QPLL_FBDIV_RATIO_OFFSET 0x6
+#define QPLL_FBDIV_RATIO_WIDTH 0x1
+#define QPLL_FBDIV_RATIO_DEFAULT 0x0
+
+#define CPLL_CFG0_ADDR 0x5c
+#define CPLL_CFG0_MASK 0xff00
+#define CPLL_CFG0_OFFSET 0x8
+#define CPLL_CFG0_WIDTH 0x8
+#define CPLL_CFG0_DEFAULT 0x0
+
+#define CPLL_CFG1_ADDR 0x5d
+#define CPLL_CFG1_MASK 0xffff
+#define CPLL_CFG1_OFFSET 0x0
+#define CPLL_CFG1_WIDTH 0x10
+#define CPLL_CFG1_DEFAULT 0x0
+
+#define CPLL_REFCLK_DIV_M_ADDR 0x5e
+#define CPLL_REFCLK_DIV_M_MASK 0x1f00
+#define CPLL_REFCLK_DIV_M_OFFSET 0x8
+#define CPLL_REFCLK_DIV_M_WIDTH 0x5
+#define CPLL_REFCLK_DIV_M_DEFAULT 0x0
+
+#define CPLL_FB_DIV_45_N1_ADDR 0x5e
+#define CPLL_FB_DIV_45_N1_MASK 0x80
+#define CPLL_FB_DIV_45_N1_OFFSET 0x7
+#define CPLL_FB_DIV_45_N1_WIDTH 0x1
+#define CPLL_FB_DIV_45_N1_DEFAULT 0x0
+
+#define CPLL_FBDIV_N2_ADDR 0x5e
+#define CPLL_FBDIV_N2_MASK 0x7f
+#define CPLL_FBDIV_N2_OFFSET 0x0
+#define CPLL_FBDIV_N2_WIDTH 0x7
+#define CPLL_FBDIV_N2_DEFAULT 0x0
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.c b/drivers/misc/jesd204b/xilinx_jesd204b.c
new file mode 100644
index 000000000000..304557c8978e
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.c
@@ -0,0 +1,399 @@
+/*
+ * Xilinx AXI-JESD204B Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include "xilinx_jesd204b.h"
+
+struct child_clk {
+ struct clk_hw hw;
+ struct jesd204b_state *st;
+ unsigned long rate;
+ bool enabled;
+};
+
+#define to_clk_priv(_hw) container_of(_hw, struct child_clk, hw)
+
+static inline void jesd204b_write(struct jesd204b_state *st,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static inline unsigned int jesd204b_read(struct jesd204b_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static ssize_t jesd204b_laneinfo_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ int ret;
+ unsigned int val1, val2, val3;
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_ID_L(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_F(lane));
+ val3 = jesd204b_read(st, XLNX_JESD204_REG_SCR_S_HD_CF(lane));
+ ret = sprintf(buf,
+ "DID: %d, BID: %d, LID: %d, L: %d, SCR: %d, F: %d\n",
+ XLNX_JESD204_LANE_DID(val1),
+ XLNX_JESD204_LANE_BID(val1),
+ XLNX_JESD204_LANE_LID(val1),
+ XLNX_JESD204_LANE_L(val1),
+ XLNX_JESD204_LANE_SCR(val3),
+ XLNX_JESD204_LANE_F(val2));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_LANE_K(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_M_N_ND_CS(lane));
+
+ ret += sprintf(buf + ret,
+ "K: %d, M: %d, N: %d, CS: %d, S: %d, N': %d, HD: %d\n",
+ XLNX_JESD204_LANE_K(val1),
+ XLNX_JESD204_LANE_M(val2),
+ XLNX_JESD204_LANE_N(val2),
+ XLNX_JESD204_LANE_CS(val2),
+ XLNX_JESD204_LANE_S(val3),
+ XLNX_JESD204_LANE_ND(val2),
+ XLNX_JESD204_LANE_HD(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_FCHK(lane));
+ ret += sprintf(buf + ret, "FCHK: 0x%X, CF: %d\n",
+ XLNX_JESD204_LANE_FCHK(val1),
+ XLNX_JESD204_LANE_CF(val3));
+
+ val1 = jesd204b_read(st, XLNX_JESD204_REG_SC2_ADJ_CTRL(lane));
+ val2 = jesd204b_read(st, XLNX_JESD204_REG_LANE_VERSION(lane));
+ ret += sprintf(buf + ret,
+ "ADJCNT: %d, PHYADJ: %d, ADJDIR: %d, JESDV: %d, SUBCLASS: %d\n",
+ XLNX_JESD204_LANE_ADJ_CNT(val1),
+ XLNX_JESD204_LANE_PHASE_ADJ_REQ(val1),
+ XLNX_JESD204_LANE_ADJ_CNT_DIR(val1),
+ XLNX_JESD204_LANE_JESDV(val2),
+ XLNX_JESD204_LANE_SUBCLASS(val2));
+
+ ret += sprintf(buf + ret, "MFCNT : 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_MFC_CNT(lane)));
+ ret += sprintf(buf + ret, "ILACNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ILA_CNT(lane)));
+ ret += sprintf(buf + ret, "ERRCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_ERR_CNT(lane)));
+ ret += sprintf(buf + ret, "BUFCNT: 0x%X\n",
+ jesd204b_read(st, XLNX_JESD204_REG_TM_BUF_ADJ(lane)));
+ ret += sprintf(buf + ret, "LECNT: 0x%X\n",
+ jesd204b_read(st,
+ XLNX_JESD204_REG_TM_LINK_ERR_CNT(lane)));
+
+ ret += sprintf(buf + ret, "FC: %lu\n", st->rate);
+
+ return ret;
+}
+
+#define JESD_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_info_read(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return jesd204b_laneinfo_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_info, 0400, jesd204b_lane##_x##_info_read, \
+ NULL)
+
+JESD_LANE(0);
+JESD_LANE(1);
+JESD_LANE(2);
+JESD_LANE(3);
+JESD_LANE(4);
+JESD_LANE(5);
+JESD_LANE(6);
+JESD_LANE(7);
+
+static ssize_t jesd204b_lane_syscstat_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, unsigned int lane)
+{
+ unsigned int stat;
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ stat = jesd204b_read(st, XLNX_JESD204_REG_SYNC_ERR_STAT);
+
+ return sprintf(buf,
+ "NOT_IN_TAB: %d, DISPARITY: %d, UNEXPECTED_K: %d\n",
+ stat & XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane),
+ stat & XLNX_JESD204_SYNC_ERR_DISPARITY(lane),
+ stat & XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane));
+}
+
+#define JESD_SYNCSTAT_LANE(_x) \
+static ssize_t jesd204b_lane##_x##_syncstat_read(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ return jesd204b_lane_syscstat_read(dev, attr, buf, _x); \
+} \
+static DEVICE_ATTR(lane##_x##_syncstat, 0400, \
+ jesd204b_lane##_x##_syncstat_read, NULL)
+
+JESD_SYNCSTAT_LANE(0);
+JESD_SYNCSTAT_LANE(1);
+JESD_SYNCSTAT_LANE(2);
+JESD_SYNCSTAT_LANE(3);
+JESD_SYNCSTAT_LANE(4);
+JESD_SYNCSTAT_LANE(5);
+JESD_SYNCSTAT_LANE(6);
+JESD_SYNCSTAT_LANE(7);
+
+static ssize_t jesd204b_reg_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = sscanf(buf, "%i %i", &st->addr, &val);
+ if (ret == 2)
+ jesd204b_write(st, st->addr, val);
+
+ return count;
+}
+
+static ssize_t jesd204b_reg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st, st->addr));
+}
+
+static DEVICE_ATTR(reg_access, 0600, jesd204b_reg_read,
+ jesd204b_reg_write);
+
+static ssize_t jesd204b_syncreg_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct jesd204b_state *st = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%X\n", jesd204b_read(st,
+ XLNX_JESD204_REG_SYNC_STATUS));
+}
+
+static DEVICE_ATTR(sync_status, 0400, jesd204b_syncreg_read, NULL);
+
+static unsigned int long jesd204b_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate;
+}
+
+static int jesd204b_clk_enable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = true;
+
+ return 0;
+}
+
+static void jesd204b_clk_disable(struct clk_hw *hw)
+{
+ to_clk_priv(hw)->enabled = false;
+}
+
+static int jesd204b_clk_is_enabled(struct clk_hw *hw)
+{
+ return to_clk_priv(hw)->enabled;
+}
+
+static const struct clk_ops clkout_ops = {
+ .recalc_rate = jesd204b_clk_recalc_rate,
+ .enable = jesd204b_clk_enable,
+ .disable = jesd204b_clk_disable,
+ .is_enabled = jesd204b_clk_is_enabled,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id jesd204b_of_match[] = {
+ { .compatible = "xlnx,jesd204-5.1",},
+ { .compatible = "xlnx,jesd204-5.2",},
+ { .compatible = "xlnx,jesd204-6.1",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, jesd204b_of_match);
+
+static int jesd204b_probe(struct platform_device *pdev)
+{
+ struct jesd204b_state *st;
+ struct resource *mem; /* IO mem resources */
+ struct clk *clk;
+ struct child_clk *clk_priv;
+ struct clk_init_data init;
+ unsigned int val;
+ int ret;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return -EPROBE_DEFER;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(st->regs)) {
+ dev_err(&pdev->dev, "Failed ioremap\n");
+ return PTR_ERR(st->regs);
+ }
+
+ st->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, st);
+
+ st->clk = clk;
+ clk_set_rate(st->clk, 156250000);
+ st->rate = clk_get_rate(clk);
+
+ of_property_read_u32(pdev->dev.of_node, "xlnx,node-is-transmit",
+ &st->transmit);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,lanes",
+ &st->lanes);
+ if (ret)
+ st->lanes = jesd204b_read(st, XLNX_JESD204_REG_LANES) + 1;
+
+ jesd204b_write(st, XLNX_JESD204_REG_RESET, XLNX_JESD204_RESET);
+ while (!jesd204b_read(st, XLNX_JESD204_REG_RESET))
+ msleep(20);
+
+ jesd204b_write(st, XLNX_JESD204_REG_ILA_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,lanesync-enable") ? XLNX_JESD204_ILA_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SCR_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,scramble-enable") ? XLNX_JESD204_SCR_EN : 0));
+
+ jesd204b_write(st, XLNX_JESD204_REG_SYSREF_CTRL,
+ (of_property_read_bool(pdev->dev.of_node,
+ "xlnx,sysref-always-enable") ?
+ XLNX_JESD204_ALWAYS_SYSREF_EN : 0));
+
+ device_create_file(&pdev->dev, &dev_attr_reg_access);
+
+ device_create_file(&pdev->dev, &dev_attr_sync_status);
+ switch (st->lanes) {
+ case 8:
+ device_create_file(&pdev->dev, &dev_attr_lane4_info);
+ device_create_file(&pdev->dev, &dev_attr_lane5_info);
+ device_create_file(&pdev->dev, &dev_attr_lane6_info);
+ device_create_file(&pdev->dev, &dev_attr_lane7_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane4_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane5_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane6_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane7_syncstat);
+ }
+ /* fall through */
+ case 4:
+ device_create_file(&pdev->dev, &dev_attr_lane2_info);
+ device_create_file(&pdev->dev, &dev_attr_lane3_info);
+ if (!st->transmit) {
+ device_create_file(&pdev->dev,
+ &dev_attr_lane2_syncstat);
+ device_create_file(&pdev->dev,
+ &dev_attr_lane3_syncstat);
+ }
+ /* fall through */
+ case 2:
+ device_create_file(&pdev->dev, &dev_attr_lane1_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane1_syncstat);
+ /* fall through */
+ case 1:
+ device_create_file(&pdev->dev, &dev_attr_lane0_info);
+ if (!st->transmit)
+ device_create_file(&pdev->dev,
+ &dev_attr_lane0_syncstat);
+ break;
+ default:
+
+ break;
+ }
+
+ clk_priv = devm_kzalloc(&pdev->dev, sizeof(*clk_priv), GFP_KERNEL);
+ if (!clk_priv)
+ return -ENOMEM;
+
+ /* struct child_clk assignments */
+ clk_priv->hw.init = &init;
+ clk_priv->rate = st->rate;
+ clk_priv->st = st;
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ val = jesd204b_read(st, XLNX_JESD204_REG_VERSION);
+
+ dev_info(&pdev->dev,
+ "AXI-JESD204B %d.%d Rev %d, at 0x%08llX mapped to 0x%p",
+ XLNX_JESD204_VERSION_MAJOR(val),
+ XLNX_JESD204_VERSION_MINOR(val),
+ XLNX_JESD204_VERSION_REV(val),
+ (unsigned long long)mem->start, st->regs);
+
+ return 0;
+}
+
+static int jesd204b_remove(struct platform_device *pdev)
+{
+ struct jesd204b_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->clk);
+ clk_put(st->clk);
+
+ return 0;
+}
+
+static struct platform_driver jesd204b_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = jesd204b_of_match,
+ },
+ .probe = jesd204b_probe,
+ .remove = jesd204b_remove,
+};
+
+module_platform_driver(jesd204b_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AXI-JESD204B Interface Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/jesd204b/xilinx_jesd204b.h b/drivers/misc/jesd204b/xilinx_jesd204b.h
new file mode 100644
index 000000000000..b9946a723a23
--- /dev/null
+++ b/drivers/misc/jesd204b/xilinx_jesd204b.h
@@ -0,0 +1,135 @@
+/*
+ * Xilinx AXI-JESD204B v5.1 Interface Module
+ *
+ * Copyright 2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * http://wiki.analog.com/resources/fpga/xilinx/
+ */
+
+#ifndef XILINX_JESD204B_H_
+#define XILINX_JESD204B_H_
+
+struct jesd204b_state {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy;
+ struct clk *clk;
+ u32 lanes;
+ u32 vers_id;
+ u32 addr;
+ u32 band;
+ u32 transmit;
+ u32 pll;
+ unsigned long rate;
+};
+
+#define XLNX_JESD204_REG_VERSION 0x000
+#define XLNX_JESD204_VERSION_MAJOR(x) (((x) >> 24) & 0xFF)
+#define XLNX_JESD204_VERSION_MINOR(x) (((x) >> 16) & 0xFF)
+#define XLNX_JESD204_VERSION_REV(x) (((x) >> 8) & 0xFF)
+
+#define XLNX_JESD204_REG_RESET 0x004
+#define XLNX_JESD204_RESET (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_CTRL 0x008
+#define XLNX_JESD204_ILA_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SCR_CTRL 0x00C
+#define XLNX_JESD204_SCR_EN (1 << 0)
+
+#define XLNX_JESD204_REG_SYSREF_CTRL 0x010
+#define XLNX_JESD204_ALWAYS_SYSREF_EN (1 << 0)
+
+#define XLNX_JESD204_REG_ILA_MFC 0x014
+#define XLNX_JESD204_ILA_MFC(x) (((x) - 1) & 0xFF)
+ /* TX only 4..256 */
+
+#define XLNX_JESD204_REG_TEST_MODE_SEL 0x018
+#define XLNX_JESD204_TEST_MODE_OFF 0 /* Normal operation */
+#define XLNX_JESD204_TEST_MODE_K28_5 1 /* Send/Receive /K28.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_ILA 2 /* Synchronize as normal then
+ * send/receive repeated ILA
+ * sequences
+ */
+#define XLNX_JESD204_TEST_MODE_D21_5 3 /* Send/Receive /D21.5/
+ * indefinitely
+ */
+#define XLNX_JESD204_TEST_MODE_RPAT 5 /* Send/Receive modified
+ * random pattern (RPAT)
+ */
+#define XLNX_JESD204_TEST_MODE_JSPAT 7 /* Send/Receive a scrambled
+ * jitter pattern (JSPAT)
+ */
+
+#define XLNX_JESD204_REG_SYNC_STATUS 0x038 /* Link SYNC status */
+#define XLNX_JESD204_REG_SYNC_ERR_STAT 0x01C /* RX only */
+#define XLNX_JESD204_SYNC_ERR_NOT_IN_TAB(lane) (1 << (0 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_DISPARITY(lane) (1 << (1 + (lane) * 3))
+#define XLNX_JESD204_SYNC_ERR_UNEXPECTED_K(lane) (1 << (2 + (lane) * 3))
+
+#define XLNX_JESD204_REG_OCTETS_PER_FRAME 0x020
+#define XLNX_JESD204_OCTETS_PER_FRAME(x) (((x) - 1) & 0xFF) /* 1..256 */
+
+#define XLNX_JESD204_REG_FRAMES_PER_MFRAME 0x024
+#define XLNX_JESD204_FRAMES_PER_MFRAME(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_LANES 0x028
+#define XLNX_JESD204_LANES(x) (((x) - 1) & 0x1F) /* 1..32 */
+
+#define XLNX_JESD204_REG_SUBCLASS 0x02C
+
+#define XLNX_JESD204_REG_RX_BUF_DELAY 0x030 /* RX only */
+#define XLNX_JESD204_RX_BUF_DELAY(x) ((x) & 0x1FFF)
+
+#define XLNX_JESD204_REG_RX_LINK_CTRL 0x034 /* RX only */
+#define XLNX_JESD204_LINK_TEST_EN (1 << 0)
+#define XLNX_JESD204_SYNC_ERR_REP_DIS (1 << 8)
+
+/* Per LANE Registers */
+#define XLNX_JESD204_REG_LANE_VERSION(l) (0x800 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SUBCLASS(x) (((x) >> 0) & 0x7)
+#define XLNX_JESD204_LANE_JESDV(x) (((x) >> 8) & 0x7)
+
+#define XLNX_JESD204_REG_LANE_F(l) (0x804 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_F(x) ((((x) >> 0) & 0xFF) + 1)
+
+#define XLNX_JESD204_REG_LANE_K(l) (0x808 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_K(x) ((((x) >> 0) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_ID_L(l) (0x80C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_DID(x) (((x) >> 0) & 0xFF)
+#define XLNX_JESD204_LANE_BID(x) (((x) >> 8) & 0x1F)
+#define XLNX_JESD204_LANE_LID(x) (((x) >> 16) & 0x1F)
+#define XLNX_JESD204_LANE_L(x) ((((x) >> 24) & 0x1F) + 1)
+
+#define XLNX_JESD204_REG_M_N_ND_CS(l) (0x810 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_M(x) ((((x) >> 0) & 0xFF) + 1)
+#define XLNX_JESD204_LANE_N(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_ND(x) ((((x) >> 16) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_CS(x) (((x) >> 24) & 0x3)
+
+#define XLNX_JESD204_REG_SCR_S_HD_CF(l) (0x814 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_SCR(x) (((x) >> 0) & 0x1)
+#define XLNX_JESD204_LANE_S(x) ((((x) >> 8) & 0x1F) + 1)
+#define XLNX_JESD204_LANE_HD(x) (((x) >> 16) & 0x1)
+#define XLNX_JESD204_LANE_CF(x) (((x) >> 24) & 0x1F)
+
+#define XLNX_JESD204_REG_FCHK(l) (0x818 + ((l) * 0x40))
+#define XLNX_JESD204_LANE_FCHK(x) (((x) >> 16) & 0xFF)
+
+#define XLNX_JESD204_REG_SC2_ADJ_CTRL(l) (0x81C + ((l) * 0x40))
+#define XLNX_JESD204_LANE_ADJ_CNT(x) (((x) >> 0) & 0xF)
+#define XLNX_JESD204_LANE_PHASE_ADJ_REQ(x) (((x) >> 8) & 0x1)
+#define XLNX_JESD204_LANE_ADJ_CNT_DIR(x) (((x) >> 16) & 0x1)
+
+#define XLNX_JESD204_REG_TM_ERR_CNT(l) (0x820 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_LINK_ERR_CNT(l) (0x824 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_ILA_CNT(l) (0x828 + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_MFC_CNT(l) (0x82C + ((l) * 0x40))
+#define XLNX_JESD204_REG_TM_BUF_ADJ(l) (0x830 + ((l) * 0x40))
+
+#endif /* ADI_JESD204B_V51_H_ */
diff --git a/drivers/misc/xilinx_flex_pm.c b/drivers/misc/xilinx_flex_pm.c
new file mode 100644
index 000000000000..30dd61be1e0a
--- /dev/null
+++ b/drivers/misc/xilinx_flex_pm.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Flex Noc Performance Monitor driver.
+ * Copyright (c) 2019 Xilinx Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define to_xflex_dev_info(n) ((struct xflex_dev_info *)dev_get_drvdata(n))
+
+#define FPM_LAR_OFFSET 0xFB0
+#define FPM_UNLOCK 0xC5ACCE55
+
+#define FPM_RD_REQ_OFFSET 0x1000
+#define FPM_RD_RES_OFFSET 0x2000
+#define FPM_WR_REQ_OFFSET 0x3000
+#define FPM_WR_RES_OFFSET 0x4000
+
+#define FPM_PORT_SEL_OFFSET 0x134
+#define FPM_MAIN_CTRL_OFFSET 0x008
+#define FPM_SRC_SEL_OFFSET 0x138
+#define FPM_STATPERIOD 0x24
+#define FPM_CFGCTRL 0x0C
+#define FPM_LPD 0x4210002
+#define FPM_FPD 0x420c003
+
+#define FPM_VAL 0x300
+#define FPM_SRC 0x200
+#define FPM_WRRSP_L 0x70000
+#define FPM_WRREQ_L 0x60000
+#define FPM_RDRSP_L 0x50000
+#define FPM_RDREQ_L 0x40000
+#define FPM_PROBE_SHIFT 16
+#define FPM_COUNTER_OFFSET 0x14
+#define FPM_GLOBALEN BIT(0)
+#define FPM_STATEN BIT(3)
+#define FPM_STATCOND_DUMP BIT(5)
+#define FPM_NUM_COUNTERS 4
+#define FPM_MAINCTL_DIS 0
+
+#define FPM_SRC_OFF 0x0
+#define FPM_SRC_CYCLE 0x1
+#define FPM_SRC_IDLE 0x2
+#define FPM_SRC_XFER 0x3
+#define FPM_SRC_BUSY 0x4
+#define FPM_SRC_WAIT 0x5
+#define FPM_SRC_PACKET 0x6
+
+/* Port values */
+#define FPM_PORT_LPD_AFIFS_AXI 0x0
+#define FPM_PORT_LPD_OCM 0x1
+#define FPM_PORT_LPD_OCMEXT 0x2
+#define FPM_PORT_PMC_RPU_AXI0 0x3
+
+#define FPM_PORT_FPDAXI 0x1
+#define FPM_PORT_PROTXPPU 0x2
+
+/**
+ * struct xflex_dev_info - Global Driver structure
+ * @dev: Device structure
+ * @baselpd: Iomapped LPD base address
+ * @basefpd: Iomapped FPD base address
+ * @funnel: Iomapped funnel register base address
+ * @counterid_lpd: LPD counter id
+ * @counterid_fpd: FPD counter id
+ * @mutex: avoid parallel access to device
+ */
+struct xflex_dev_info {
+ struct device *dev;
+ void __iomem *baselpd;
+ void __iomem *basefpd;
+ void __iomem *funnel;
+ u32 counterid_fpd;
+ u32 counterid_lpd;
+ struct mutex mutex; /* avoid parallel access to device */
+};
+
+/**
+ * enum xflex_sysfs_cmd_codes - sysfs command codes
+ * @XFLEX_GET_COUNTER_FPD: get the FPD counter value
+ * @XFLEX_SET_COUNTER_FPD: set the FPD counter value
+ * @XFLEX_GET_COUNTER_FPD_RDREQ: get the FPD read request count
+ * @XFLEX_GET_COUNTER_FPD_RDRSP: get the FPD read response count
+ * @XFLEX_GET_COUNTER_FPD_WRREQ: get the FPD write request count
+ * @XFLEX_GET_COUNTER_FPD_WRRSP: get the FPD write response count
+ * @XFLEX_GET_COUNTER_LPD_RDREQ: get the LPD read request count
+ * @XFLEX_GET_COUNTER_LPD_RDRSP: get the LPD read response count
+ * @XFLEX_GET_COUNTER_LPD_WRREQ: get the LPD write request count
+ * @XFLEX_GET_COUNTER_LPD_WRRSP: get the LPD write response count
+ * @XFLEX_SET_COUNTER_LPD: set the LPD counter value
+ * @XFLEX_SET_SRC_COUNTER_LPD: set the LPD source
+ * @XFLEX_SET_SRC_COUNTER_FPD: set the FPD source
+ * @XFLEX_SET_PORT_COUNTER_LPD: set the LPD port
+ * @XFLEX_SET_PORT_COUNTER_FPD: set the FPD port
+ */
+enum xflex_sysfs_cmd_codes {
+ XFLEX_GET_COUNTER_FPD = 0,
+ XFLEX_SET_COUNTER_FPD,
+ XFLEX_GET_COUNTER_FPD_RDREQ,
+ XFLEX_GET_COUNTER_FPD_RDRSP,
+ XFLEX_GET_COUNTER_FPD_WRREQ,
+ XFLEX_GET_COUNTER_FPD_WRRSP,
+ XFLEX_GET_COUNTER_LPD_RDREQ,
+ XFLEX_GET_COUNTER_LPD_RDRSP,
+ XFLEX_GET_COUNTER_LPD_WRREQ,
+ XFLEX_GET_COUNTER_LPD_WRRSP,
+ XFLEX_SET_COUNTER_LPD,
+ XFLEX_SET_SRC_COUNTER_LPD,
+ XFLEX_SET_SRC_COUNTER_FPD,
+ XFLEX_SET_PORT_COUNTER_LPD,
+ XFLEX_SET_PORT_COUNTER_FPD,
+};
+
+static inline void fpm_reg(void __iomem *base, u32 val, u32 offset)
+{
+ writel(val, base + FPM_RD_REQ_OFFSET + offset);
+ writel(val, base + FPM_RD_RES_OFFSET + offset);
+ writel(val, base + FPM_WR_REQ_OFFSET + offset);
+ writel(val, base + FPM_WR_RES_OFFSET + offset);
+}
+
+static void reset_default(struct device *dev, u32 counter, u32 domain)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+ void __iomem *base = flexpm->basefpd;
+ u32 offset;
+
+ if (domain == FPM_LPD)
+ base = flexpm->baselpd;
+
+ fpm_reg(base, FPM_MAINCTL_DIS, FPM_MAIN_CTRL_OFFSET);
+ fpm_reg(base, FPM_STATEN | FPM_STATCOND_DUMP, FPM_MAIN_CTRL_OFFSET);
+ fpm_reg(base, FPM_STATEN | FPM_STATCOND_DUMP, FPM_MAIN_CTRL_OFFSET);
+
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(base, FPM_PORT_LPD_OCM, offset);
+ offset = FPM_SRC_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(base, FPM_SRC_PACKET, offset);
+
+ fpm_reg(base, 0, FPM_STATPERIOD);
+ fpm_reg(base, FPM_GLOBALEN, FPM_CFGCTRL);
+}
+
+/**
+ * xflex_sysfs_cmd - Implements sysfs operations
+ * @dev: Device structure
+ * @buf: Value to write
+ * @cmd: sysfs cmd
+ *
+ * Return: value read from the sysfs cmd on success and negative error code
+ * otherwise.
+ */
+static int xflex_sysfs_cmd(struct device *dev, const char *buf,
+ enum xflex_sysfs_cmd_codes cmd)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+ u32 domain, src, offset, reg, val, counter;
+ int ret;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ u32 rdval = 0;
+ u32 pm_api_ret[4] = {0, 0, 0, 0};
+
+ if (IS_ERR_OR_NULL(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ mutex_lock(&flexpm->mutex);
+
+ switch (cmd) {
+ case XFLEX_GET_COUNTER_LPD_WRRSP:
+ reg = flexpm->counterid_lpd | FPM_WRRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_WRREQ:
+ reg = flexpm->counterid_lpd | FPM_WRREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_RDRSP:
+ reg = flexpm->counterid_lpd | FPM_RDRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_LPD_RDREQ:
+ reg = flexpm->counterid_lpd | FPM_RDREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_LPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_SET_COUNTER_LPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ flexpm->counterid_lpd = val;
+ reset_default(dev, val, FPM_LPD);
+ break;
+
+ case XFLEX_SET_PORT_COUNTER_FPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ counter = flexpm->counterid_fpd * FPM_COUNTER_OFFSET;
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(flexpm->basefpd, val, offset);
+ break;
+
+ case XFLEX_SET_PORT_COUNTER_LPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ counter = flexpm->counterid_lpd * FPM_COUNTER_OFFSET;
+ offset = FPM_PORT_SEL_OFFSET + counter * FPM_COUNTER_OFFSET;
+ fpm_reg(flexpm->baselpd, val, offset);
+ break;
+
+ case XFLEX_SET_SRC_COUNTER_LPD:
+ reg = flexpm->counterid_lpd;
+ domain = FPM_LPD;
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ for (src = 0; src < FPM_NUM_COUNTERS; src++) {
+ reg = reg | FPM_SRC | (src << FPM_PROBE_SHIFT);
+ ret = eemi_ops->ioctl(domain, IOCTL_PROBE_COUNTER_WRITE,
+ reg, val, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Counter write error %d\n", ret);
+ goto exit_unlock;
+ }
+ }
+ break;
+
+ case XFLEX_SET_SRC_COUNTER_FPD:
+ reg = flexpm->counterid_fpd;
+ domain = FPM_FPD;
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ for (src = 0; src < FPM_NUM_COUNTERS; src++) {
+ reg = reg | FPM_SRC | (src << FPM_PROBE_SHIFT);
+ ret = eemi_ops->ioctl(domain, IOCTL_PROBE_COUNTER_WRITE,
+ reg, val, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Counter write error %d\n", ret);
+ goto exit_unlock;
+ }
+ }
+ break;
+
+ case XFLEX_SET_COUNTER_FPD:
+ ret = kstrtou32(buf, 0, &val);
+ if (ret < 0)
+ goto exit_unlock;
+
+ flexpm->counterid_fpd = val;
+ reset_default(dev, val, FPM_FPD);
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_WRRSP:
+ reg = flexpm->counterid_fpd | FPM_WRRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_WRREQ:
+ reg = flexpm->counterid_fpd | FPM_WRREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_RDRSP:
+ reg = flexpm->counterid_fpd | FPM_RDRSP_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ case XFLEX_GET_COUNTER_FPD_RDREQ:
+ reg = flexpm->counterid_fpd | FPM_RDREQ_L | FPM_VAL;
+ ret = eemi_ops->ioctl(FPM_FPD, IOCTL_PROBE_COUNTER_READ,
+ reg, 0, &pm_api_ret[0]);
+ if (ret < 0) {
+ dev_err(dev, "Counter read error %d\n", ret);
+ goto exit_unlock;
+ }
+
+ rdval = pm_api_ret[1];
+ break;
+
+ default:
+ dev_err(dev, "Invalid option\n");
+ break;
+ }
+
+ mutex_unlock(&flexpm->mutex);
+ return rdval;
+
+exit_unlock:
+ mutex_unlock(&flexpm->mutex);
+ return ret;
+}
+
+/* Sysfs functions */
+
+static ssize_t counterfpd_wrreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_WRREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_wrreq);
+
+static ssize_t counterfpd_wrrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_WRRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_wrrsp);
+
+static ssize_t counterfpd_rdreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_RDREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_rdreq);
+
+static ssize_t counterfpd_rdrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_FPD_RDRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterfpd_rdrsp);
+
+static ssize_t counterlpd_wrreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_WRREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_wrreq);
+
+static ssize_t counterlpd_wrrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_WRRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_wrrsp);
+
+static ssize_t counterlpd_rdreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_RDREQ);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_rdreq);
+
+static ssize_t counterlpd_rdrsp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rdval = xflex_sysfs_cmd(dev, buf, XFLEX_GET_COUNTER_LPD_RDRSP);
+
+ if (rdval < 0)
+ return 0;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", rdval);
+}
+static DEVICE_ATTR_RO(counterlpd_rdrsp);
+
+static ssize_t counterlpdsrc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_SRC_COUNTER_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterlpdsrc);
+
+static ssize_t counterfpdsrc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_SRC_COUNTER_FPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterfpdsrc);
+
+static ssize_t counterlpdport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_PORT_COUNTER_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterlpdport);
+
+static ssize_t counterfpdport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xflex_sysfs_cmd(dev, buf, XFLEX_SET_PORT_COUNTER_FPD);
+
+ return size;
+}
+static DEVICE_ATTR_WO(counterfpdport);
+
+static ssize_t counteridlpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%08d\n", flexpm->counterid_lpd);
+}
+
+static ssize_t counteridlpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ ret = kstrtou32(buf, 0, &flexpm->counterid_lpd);
+ if (ret < 0)
+ return ret;
+
+ reset_default(dev, flexpm->counterid_lpd, FPM_LPD);
+
+ return size;
+}
+static DEVICE_ATTR_RW(counteridlpd);
+
+static ssize_t counteridfpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%08d\n", flexpm->counterid_fpd);
+}
+
+static ssize_t counteridfpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct xflex_dev_info *flexpm = to_xflex_dev_info(dev);
+
+ ret = kstrtou32(buf, 0, &flexpm->counterid_fpd);
+ if (ret < 0)
+ return ret;
+
+ return size;
+}
+static DEVICE_ATTR_RW(counteridfpd);
+
+static struct attribute *xflex_attrs[] = {
+ &dev_attr_counterlpdsrc.attr,
+ &dev_attr_counterlpdport.attr,
+ &dev_attr_counterfpdsrc.attr,
+ &dev_attr_counterfpdport.attr,
+
+ &dev_attr_counterlpd_rdreq.attr,
+ &dev_attr_counterlpd_wrreq.attr,
+ &dev_attr_counterlpd_rdrsp.attr,
+ &dev_attr_counterlpd_wrrsp.attr,
+
+ &dev_attr_counterfpd_rdreq.attr,
+ &dev_attr_counterfpd_wrreq.attr,
+ &dev_attr_counterfpd_rdrsp.attr,
+ &dev_attr_counterfpd_wrrsp.attr,
+
+ &dev_attr_counteridlpd.attr,
+ &dev_attr_counteridfpd.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(xflex);
+
+/**
+ * xflex_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation and creates sysfs entries for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xflex_probe(struct platform_device *pdev)
+{
+ struct xflex_dev_info *flexpm;
+ struct resource *res;
+ int err;
+ struct device *dev = &pdev->dev;
+
+ flexpm = devm_kzalloc(dev, sizeof(*flexpm), GFP_KERNEL);
+ if (!flexpm)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "baselpd");
+ flexpm->baselpd = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->baselpd))
+ return PTR_ERR(flexpm->baselpd);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "basefpd");
+ flexpm->basefpd = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->basefpd))
+ return PTR_ERR(flexpm->basefpd);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "funnel");
+ flexpm->funnel = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(flexpm->funnel))
+ return PTR_ERR(flexpm->funnel);
+
+ mutex_init(&flexpm->mutex);
+ writel(FPM_UNLOCK, flexpm->funnel + FPM_LAR_OFFSET);
+ writel(FPM_UNLOCK, flexpm->baselpd + FPM_LAR_OFFSET);
+
+ /* Create sysfs file entries for the device */
+ err = sysfs_create_groups(&dev->kobj, xflex_groups);
+ if (err < 0) {
+ dev_err(dev, "unable to create sysfs entries\n");
+ return err;
+ }
+
+ dev_set_drvdata(dev, flexpm);
+
+ return 0;
+}
+
+/**
+ * xflex_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int xflex_remove(struct platform_device *pdev)
+{
+ sysfs_remove_groups(&pdev->dev.kobj, xflex_groups);
+ return 0;
+}
+
+static const struct of_device_id xflex_of_match[] = {
+ { .compatible = "xlnx,flexnoc-pm-2.7", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xflex_of_match);
+
+static struct platform_driver xflex_driver = {
+ .driver = {
+ .name = "xilinx-flex",
+ .of_match_table = xflex_of_match,
+ },
+ .probe = xflex_probe,
+ .remove = xflex_remove,
+};
+
+module_platform_driver(xflex_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx Flexnoc performance monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/xilinx_trafgen.c b/drivers/misc/xilinx_trafgen.c
new file mode 100644
index 000000000000..89187c0c50f0
--- /dev/null
+++ b/drivers/misc/xilinx_trafgen.c
@@ -0,0 +1,1654 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXI Traffic Generator driver.
+ *
+ * Copyright (c) 2013 - 2019 Xilinx Inc.
+ *
+ * Description:
+ * This driver is developed for AXI Traffic Generator IP, which is
+ * designed to generate AXI4 traffic which can be used to stress
+ * different modules/interconnect connected in the system. Different
+ * configurable options which are provided through sysfs entries
+ * allow the user to generate a wide variety of traffic based on
+ * their requirements.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hw specific definitions */
+
+/* Internal RAM Offsets */
+#define XTG_PARAM_RAM_OFFSET 0x1000 /* Parameter RAM offset */
+#define XTG_COMMAND_RAM_OFFSET 0x8000 /* Command RAM offset */
+#define XTG_COMMAND_RAM_MSB_OFFSET 0xa000 /**< Command RAM MSB Offset */
+#define XTG_MASTER_RAM_INIT_OFFSET 0x10000 /* Master RAM initial offset(v1.0) */
+#define XTG_MASTER_RAM_OFFSET 0xc000 /* Master RAM offset */
+#define XTG_WRITE_COMMAND_RAM_OFFSET 0x9000 /* Write Command RAM offset */
+
+/* Register Offsets */
+#define XTG_MCNTL_OFFSET 0x00 /* Master control */
+#define XTG_SCNTL_OFFSET 0x04 /* Slave control */
+#define XTG_ERR_STS_OFFSET 0x08 /* Error status */
+#define XTG_ERR_EN_OFFSET 0x0C /* Error enable */
+#define XTG_MSTERR_INTR_OFFSET 0x10 /* Master error interrupt enable */
+#define XTG_CFG_STS_OFFSET 0x14 /* Config status */
+#define XTG_STREAM_CNTL_OFFSET 0x30 /* Streaming Control */
+#define XTG_STREAM_CFG_OFFSET 0x34 /* Streaming Control */
+#define XTG_STREAM_TL_OFFSET 0x38 /* Streaming Transfer Length */
+#define XTG_STREAM_TKTS1_OFFSET 0x40 /* Streaming tkeep tstrb set1*/
+#define XTG_STREAM_TKTS2_OFFSET 0x44 /* Streaming tkeep tstrb set2*/
+#define XTG_STREAM_TKTS3_OFFSET 0x48 /* Streaming tkeep tstrb set3*/
+#define XTG_STREAM_TKTS4_OFFSET 0x4C /* Streaming tkeep tstrb set4*/
+#define XTG_STATIC_CNTL_OFFSET 0x60 /* Static Control */
+#define XTG_STATIC_LEN_OFFSET 0x64 /* Static Length */
+
+/* Register Bitmasks/shifts */
+
+/* Master logic enable */
+#define XTG_MCNTL_MSTEN_MASK 0x00100000
+/* Loop enable */
+#define XTG_MCNTL_LOOPEN_MASK 0x00080000
+/* Slave error interrupt enable */
+#define XTG_SCNTL_ERREN_MASK 0x00008000
+/* Master complete interrupt enable */
+#define XTG_ERR_EN_MSTIRQEN_MASK 0x80000000
+/* Master error interrupt enable */
+#define XTG_MSTERR_INTR_MINTREN_MASK 0x00008000
+/* Master complete done status */
+#define XTG_ERR_STS_MSTDONE_MASK 0x80000000
+/* Error mask for error status/enable registers */
+#define XTG_ERR_ALL_ERRS_MASK 0x801F0003
+/* Core Revision shift */
+#define XTG_MCNTL_REV_SHIFT 24
+
+/* Axi Traffic Generator Command RAM Entry field mask/shifts */
+
+/* Command RAM entry masks */
+#define XTG_LEN_MASK 0xFF /* Driven to a*_len line */
+#define XTG_LOCK_MASK 0x1 /* Driven to a*_lock line */
+#define XTG_BURST_MASK 0x3 /* Driven to a*_burst line */
+#define XTG_SIZE_MASK 0x7 /* Driven to a*_size line */
+#define XTG_ID_MASK 0x1F /* Driven to a*_id line */
+#define XTG_PROT_MASK 0x7 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_MASK 0x7 /* Last address */
+#define XTG_VALID_CMD_MASK 0x1 /* Valid Command */
+#define XTG_MSTRAM_INDEX_MASK 0x1FFF /* Master RAM Index */
+#define XTG_OTHER_DEPEND_MASK 0x1FF /* Other depend Command no */
+#define XTG_MY_DEPEND_MASK 0x1FF /* My depend command no */
+#define XTG_QOS_MASK 0xF /* Driven to a*_qos line */
+#define XTG_USER_MASK 0xFF /* Driven to a*_user line */
+#define XTG_CACHE_MASK 0xF /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_MASK 0x7 /* Expected response */
+
+/* Command RAM entry shift values */
+#define XTG_LEN_SHIFT 0 /* Driven to a*_len line */
+#define XTG_LOCK_SHIFT 8 /* Driven to a*_lock line */
+#define XTG_BURST_SHIFT 10 /* Driven to a*_burst line */
+#define XTG_SIZE_SHIFT 12 /* Driven to a*_size line */
+#define XTG_ID_SHIFT 15 /* Driven to a*_id line */
+#define XTG_PROT_SHIFT 21 /* Driven to a*_prot line */
+#define XTG_LAST_ADDR_SHIFT 28 /* Last address */
+#define XTG_VALID_CMD_SHIFT 31 /* Valid Command */
+#define XTG_MSTRAM_INDEX_SHIFT 0 /* Master RAM Index */
+#define XTG_OTHER_DEPEND_SHIFT 13 /* Other depend cmd num */
+#define XTG_MY_DEPEND_SHIFT 22 /* My depend cmd num */
+#define XTG_QOS_SHIFT 16 /* Driven to a*_qos line */
+#define XTG_USER_SHIFT 5 /* Driven to a*_user line */
+#define XTG_CACHE_SHIFT 4 /* Driven to a*_cache line */
+#define XTG_EXPECTED_RESP_SHIFT 0 /* Expected response */
+
+/* Axi Traffic Generator Parameter RAM Entry field mask/shifts */
+
+/* Parameter RAM Entry field shift values */
+#define XTG_PARAM_ADDRMODE_SHIFT 24 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_SHIFT 26 /* Interval mode */
+#define XTG_PARAM_IDMODE_SHIFT 28 /* Id mode */
+#define XTG_PARAM_OP_SHIFT 29 /* Opcode */
+
+/* PARAM RAM Opcode shift values */
+#define XTG_PARAM_COUNT_SHIFT 0 /* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_SHIFT 0 /* Delay range */
+#define XTG_PARAM_DELAY_SHIFT 8 /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_SHIFT 20 /* Address range */
+
+/* Parameter RAM Entry field mask values */
+#define XTG_PARAM_ADDRMODE_MASK 0x3 /* Address mode */
+#define XTG_PARAM_INTERVALMODE_MASK 0x3 /* Interval mode */
+#define XTG_PARAM_IDMODE_MASK 0x1 /* Id mode */
+#define XTG_PARAM_OP_MASK 0x7 /* Opcode */
+
+/* PARAM RAM Opcode mask values */
+#define XTG_PARAM_COUNT_MASK 0xFFFFFF/* Repeat/Delay count */
+#define XTG_PARAM_DELAYRANGE_MASK 0xFF /* Delay range */
+#define XTG_PARAM_DELAY_MASK 0xFFF /* FIXED RPT delay count */
+#define XTG_PARAM_ADDRRANGE_MASK 0xF /* Address range */
+
+/* PARAM RAM Opcode values */
+#define XTG_PARAM_OP_NOP 0x0 /* NOP mode */
+#define XTG_PARAM_OP_RPT 0x1 /* Repeat mode */
+#define XTG_PARAM_OP_DELAY 0x2 /* Delay mode */
+#define XTG_PARAM_OP_FIXEDRPT 0x3 /* Fixed repeat delay */
+
+/* Axi Traffic Generator Static Mode masks */
+#define XTG_STATIC_CNTL_TD_MASK 0x00000002 /* Transfer Done Mask */
+#define XTG_STATIC_CNTL_STEN_MASK 0x00000001 /* Static Enable Mask */
+#define XTG_STATIC_CNTL_RESET_MASK 0x00000000 /* Static Reset Mask */
+
+/* Axi Traffic Generator Stream Mode mask/shifts */
+#define XTG_STREAM_CNTL_STEN_MASK 0x00000001 /* Stream Enable Mask */
+#define XTG_STREAM_TL_TCNT_MASK 0xFFFF0000 /* Transfer Count Mask */
+#define XTG_STREAM_TL_TLEN_MASK 0x0000FFFF /* Transfer Length Mask */
+#define XTG_STREAM_TL_TCNT_SHIFT 16 /* Transfer Count Shift */
+
+/* Driver Specific Definitions */
+
+#define MAX_NUM_ENTRIES 256 /* Number of command entries per region */
+
+#define VALID_SIG 0xa5a5a5a5 /* Valid unique identifier */
+
+/* Internal RAM Sizes */
+#define XTG_PRM_RAM_BLOCK_SIZE 0x400 /* PRAM Block size (1KB) */
+#define XTG_CMD_RAM_BLOCK_SIZE 0x1000 /* CRAM Block size (4KB) */
+#define XTG_EXTCMD_RAM_BLOCK_SIZE 0x400 /**< Extended CMDRAM Block Size (1KB) */
+#define XTG_PARAM_RAM_SIZE 0x800 /* Parameter RAM (2KB) */
+#define XTG_COMMAND_RAM_SIZE 0x2000 /* Command RAM (8KB) */
+#define XTG_EXTCMD_RAM_SIZE 0x800 /* Command RAM (2KB) */
+#define XTG_MASTER_RAM_SIZE 0x2000 /* Master RAM (8KB) */
+
+/* RAM Access Flags */
+#define XTG_READ_RAM 0x0 /* Read RAM flag */
+#define XTG_WRITE_RAM 0x1 /* Write RAM flag */
+#define XTG_WRITE_RAM_ZERO 0x2 /* Write Zero flag */
+
+/* Bytes per entry */
+#define XTG_CRAM_BYTES_PER_ENTRY 16 /* CRAM bytes per entry */
+#define XTG_PRAM_BYTES_PER_ENTRY 4 /* PRAM bytes per entry */
+
+/* Interrupt Definitions */
+#define XTG_MASTER_CMP_INTR 0x1 /* Master complete intr flag */
+#define XTG_MASTER_ERR_INTR 0x2 /* Master error intr flag */
+#define XTG_SLAVE_ERR_INTR 0x4 /* Slave error intr flag */
+
+/*
+ * Version value of the trafgen core.
+ * For the initial IP release the version(v1.0) value is 0x47
+ * From the v2.0 IP and onwards the value starts from 0x20.
+ * For eg:
+ * v2.1 -> 0x21
+ * v2.2 -> 0x22 ... so on.
+ *
+ */
+#define XTG_INIT_VERSION 0x47 /* Trafgen initial version(v1.0) */
+
+/* Macro */
+#define to_xtg_dev_info(n) ((struct xtg_dev_info *)dev_get_drvdata(n))
+
+#define CMD_WDS 0x4 /* No of words in command ram per command */
+#define EXT_WDS 0x1 /* No of words in extended ram per command */
+#define MSB_INDEX 0x4
+/**
+ * struct xtg_cram - Command RAM structure
+ * @addr: Address Driven to a*_addr line
+ * @valid_cmd: Valid Command
+ * @last_addr: Last address
+ * @prot: Driven to a*_prot line
+ * @id: Driven to a*_id line
+ * @size: Driven to a*_size line
+ * @burst: Driven to a*_burst line
+ * @lock: Driven to a*_lock line
+ * @length: Driven to a*_len line
+ * @my_dpnd: My Depend command number
+ * @other_dpnd: Other depend command number
+ * @mram_idx: Master RAM index
+ * @qos: Driven to a*_qos line
+ * @user: Driven to a*_user line
+ * @cache: Driven to a*_cache line
+ * @expected_resp: Expected response
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_cram {
+ phys_addr_t addr;
+ u32 valid_cmd;
+ u32 last_addr;
+ u32 prot;
+ u32 id;
+ u32 size;
+ u32 burst;
+ u32 lock;
+ u32 length;
+ u32 my_dpnd;
+ u32 other_dpnd;
+ u32 mram_idx;
+ u32 qos;
+ u32 user;
+ u32 cache;
+ u32 expected_resp;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_pram - Parameter RAM structure
+ * @op_cntl0: Control field 0
+ * @op_cntl1: Control field 1
+ * @op_cntl2: Control field 2
+ * @addr_mode: Address mode
+ * @interval_mode: Interval mode
+ * @id_mode: Id mode
+ * @opcode: Opcode
+ * @index: Command Index
+ * @is_write_block: Write/Read block
+ * @is_valid_req: Unique signature
+ *
+ * FIXME: This structure is shared with the user application and
+ * hence need to be synchronized. We know these kind of structures
+ * should not be defined in the driver and this need to be fixed
+ * if found a proper placeholder (in uapi/).
+ */
+struct xtg_pram {
+ u32 op_cntl0;
+ u32 op_cntl1;
+ u32 op_cntl2;
+ u32 addr_mode;
+ u32 interval_mode;
+ u32 id_mode;
+ u32 opcode;
+ u16 index;
+ bool is_write_block;
+ u32 is_valid_req;
+};
+
+/**
+ * struct xtg_dev_info - Global Driver structure
+ * @regs: Iomapped base address
+ * @dev: Device structure
+ * @phys_base_addr: Physical base address
+ * @last_rd_valid_idx: Last Read Valid Command Index
+ * @last_wr_valid_idx: Last Write Valid Command Index
+ * @id: Device instance id
+ * @xtg_mram_offset: MasterRam offset
+ * @clk: Input clock
+ */
+struct xtg_dev_info {
+ void __iomem *regs;
+ struct device *dev;
+ phys_addr_t phys_base_addr;
+ s16 last_rd_valid_idx;
+ s16 last_wr_valid_idx;
+ u32 id;
+ u32 xtg_mram_offset;
+ struct clk *clk;
+};
+
+/**
+ * enum xtg_sysfs_ioctl - Ioctl opcodes
+ * @XTG_GET_MASTER_CMP_STS: get master complete status
+ * @XTG_GET_SLV_CTRL_REG: get slave control reg status
+ * @XTG_GET_ERR_STS: get error status
+ * @XTG_GET_CFG_STS: get config status
+ * @XTG_GET_LAST_VALID_INDEX: get last valid index
+ * @XTG_GET_DEVICE_ID: get device id
+ * @XTG_GET_RESOURCE: get resource
+ * @XTG_GET_STATIC_ENABLE: get staic mode traffic genration state
+ * @XTG_GET_STATIC_BURSTLEN: get static mode burst length
+ * @XTG_GET_STATIC_TRANSFERDONE: get static transfer done
+ * @XTG_GET_STREAM_ENABLE : get strean mode traffic genration state
+ * @XTG_GET_STREAM_TRANSFERLEN: get streaming mode transfer length
+ * @XTG_GET_STREAM_TRANSFERCNT: get streaming mode transfer count
+ * @XTG_GET_MASTER_LOOP_EN: get master loop enable status
+ * @XTG_GET_STREAM_TKTS1: get stream tstrb and tkeep set1 values
+ * @XTG_GET_STREAM_TKTS2: get stream tstrb and tkeep set2 values
+ * @XTG_GET_STREAM_TKTS3: get stream tstrb and tkeep set3 values
+ * @XTG_GET_STREAM_TKTS4: get stream tstrb and tkeep set4 values
+ * @XTG_GET_STREAM_CFG: get stream configuration values
+ * @XTG_START_MASTER_LOGIC: start master logic
+ * @XTG_SET_SLV_CTRL_REG: set slave control
+ * @XTG_CLEAR_ERRORS: clear errors
+ * @XTG_ENABLE_ERRORS: enable errors
+ * @XTG_ENABLE_INTRS: enable interrupts
+ * @XTG_CLEAR_MRAM: clear master ram
+ * @XTG_CLEAR_CRAM: clear command ram
+ * @XTG_CLEAR_PRAM: clear parameter ram
+ * @XTG_SET_STATIC_ENABLE: enable static mode traffic genration
+ * @XTG_SET_STATIC_DISABLE: disable static mode traffic genration
+ * @XTG_SET_STATIC_BURSTLEN: set static mode burst length
+ * @XTG_SET_STATIC_TRANSFERDONE: set static transfer done
+ * @XTG_SET_STREAM_ENABLE: enable streaming mode traffic genration
+ * @XTG_SET_STREAM_DISABLE: disable streaming mode traffic genration
+ * @XTG_SET_STREAM_TRANSFERLEN: set streaming mode transfer length
+ * @XTG_SET_STREAM_TRANSFERCNT: set streaming mode transfer count
+ * @XTG_SET_STREAM_TKTS1: set stream tstrb and tkeep set1 values
+ * @XTG_SET_STREAM_TKTS2: set stream tstrb and tkeep set2 values
+ * @XTG_SET_STREAM_TKTS3: set stream tstrb and tkeep set3 values
+ * @XTG_SET_STREAM_TKTS4: set stream tstrb and tkeep set4 values
+ * @XTG_SET_STREAM_CFG: set stream configuration values
+ * @XTG_MASTER_LOOP_EN: enable master loop
+ */
+enum xtg_sysfs_ioctl_opcode {
+ XTG_GET_MASTER_CMP_STS,
+ XTG_GET_SLV_CTRL_REG,
+ XTG_GET_ERR_STS,
+ XTG_GET_CFG_STS,
+ XTG_GET_LAST_VALID_INDEX,
+ XTG_GET_DEVICE_ID,
+ XTG_GET_RESOURCE,
+ XTG_GET_STATIC_ENABLE,
+ XTG_GET_STATIC_BURSTLEN,
+ XTG_GET_STATIC_TRANSFERDONE,
+ XTG_GET_STREAM_ENABLE,
+ XTG_GET_STREAM_TRANSFERLEN,
+ XTG_GET_MASTER_LOOP_EN,
+ XTG_GET_STREAM_TKTS1,
+ XTG_GET_STREAM_TKTS2,
+ XTG_GET_STREAM_TKTS3,
+ XTG_GET_STREAM_TKTS4,
+ XTG_GET_STREAM_CFG,
+ XTG_GET_STREAM_TRANSFERCNT,
+ XTG_START_MASTER_LOGIC,
+ XTG_SET_SLV_CTRL_REG,
+ XTG_CLEAR_ERRORS,
+ XTG_ENABLE_ERRORS,
+ XTG_ENABLE_INTRS,
+ XTG_CLEAR_MRAM,
+ XTG_CLEAR_CRAM,
+ XTG_CLEAR_PRAM,
+ XTG_SET_STATIC_ENABLE,
+ XTG_SET_STATIC_DISABLE,
+ XTG_SET_STATIC_BURSTLEN,
+ XTG_SET_STATIC_TRANSFERDONE,
+ XTG_SET_STREAM_ENABLE,
+ XTG_SET_STREAM_DISABLE,
+ XTG_SET_STREAM_TRANSFERLEN,
+ XTG_SET_STREAM_TRANSFERCNT,
+ XTG_SET_STREAM_TKTS1,
+ XTG_SET_STREAM_TKTS2,
+ XTG_SET_STREAM_TKTS3,
+ XTG_SET_STREAM_TKTS4,
+ XTG_SET_STREAM_CFG,
+ XTG_MASTER_LOOP_EN
+};
+
+/**
+ * xtg_access_rams - Write/Read Master/Command/Parameter RAM
+ * @tg: Pointer to xtg_dev_info structure
+ * @where: Offset from base
+ * @count: Number of bytes to write/read
+ * @flags: Read/Write/Write Zero
+ * @data: Data pointer
+ */
+static void xtg_access_rams(struct xtg_dev_info *tg, int where,
+ int count, int flags, u32 *data)
+{
+ u32 index;
+
+ switch (flags) {
+ case XTG_WRITE_RAM_ZERO:
+ memset_io(tg->regs + where, 0, count);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ writel(0x0, tg->regs + where +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_WRITE_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ writel(data[index], tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ /*
+ * This additional logic is required only for command ram.
+ * when writing to READ Command RAM write higher address to READ addr
+ * RAM
+ */
+ if (where >= XTG_COMMAND_RAM_OFFSET &&
+ where < XTG_WRITE_COMMAND_RAM_OFFSET)
+ writel(data[MSB_INDEX], tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+ /*
+ * Writing to WRITE Command RAM write higher address to WRITE addr RAM
+ */
+ if (where >= XTG_WRITE_COMMAND_RAM_OFFSET &&
+ where < XTG_COMMAND_RAM_MSB_OFFSET)
+ writel(data[MSB_INDEX], tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ case XTG_READ_RAM:
+ for (index = 0; count > 0; index++, count -= 4)
+ data[index] = readl(tg->regs + where + index * 4);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (where >= XTG_COMMAND_RAM_OFFSET &&
+ where < XTG_WRITE_COMMAND_RAM_OFFSET)
+ data[MSB_INDEX] = readl(tg->regs + XTG_COMMAND_RAM_OFFSET +
+ (where - XTG_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET));
+
+ if (where >= XTG_WRITE_COMMAND_RAM_OFFSET &&
+ where < XTG_COMMAND_RAM_MSB_OFFSET)
+ data[MSB_INDEX] = readl(tg->regs +
+ XTG_WRITE_COMMAND_RAM_OFFSET +
+ (where - XTG_WRITE_COMMAND_RAM_OFFSET) / 4 +
+ (XTG_COMMAND_RAM_MSB_OFFSET - XTG_COMMAND_RAM_OFFSET) +
+ XTG_EXTCMD_RAM_BLOCK_SIZE - XTG_CMD_RAM_BLOCK_SIZE);
+#endif
+ break;
+ }
+}
+
+/**
+ * xtg_prepare_cmd_words - Prepares all four Command RAM words
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_cram structure
+ * @cmd_words: Pointer to Command Words that needs to be prepared
+ */
+static void xtg_prepare_cmd_words(struct xtg_dev_info *tg,
+ const struct xtg_cram *cmdp, u32 *cmd_words)
+{
+ /* Command Word 0 */
+ cmd_words[0] = lower_32_bits(cmdp->addr);
+
+ /* Command Word 4 */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ cmd_words[MSB_INDEX] = upper_32_bits(cmdp->addr);
+#endif
+
+ /* Command Word 1 */
+ cmd_words[1] = 0;
+ cmd_words[1] |= (cmdp->length & XTG_LEN_MASK) << XTG_LEN_SHIFT;
+ cmd_words[1] |= (cmdp->lock & XTG_LOCK_MASK) << XTG_LOCK_SHIFT;
+ cmd_words[1] |= (cmdp->burst & XTG_BURST_MASK) << XTG_BURST_SHIFT;
+ cmd_words[1] |= (cmdp->size & XTG_SIZE_MASK) << XTG_SIZE_SHIFT;
+ cmd_words[1] |= (cmdp->id & XTG_ID_MASK) << XTG_ID_SHIFT;
+ cmd_words[1] |= (cmdp->prot & XTG_PROT_MASK) << XTG_PROT_SHIFT;
+ cmd_words[1] |= (cmdp->last_addr & XTG_LAST_ADDR_MASK) <<
+ XTG_LAST_ADDR_SHIFT;
+ cmd_words[1] |= (cmdp->valid_cmd & XTG_VALID_CMD_MASK) <<
+ XTG_VALID_CMD_SHIFT;
+
+ /* Command Word 2 */
+ cmd_words[2] = 0;
+ cmd_words[2] |= (cmdp->mram_idx & XTG_MSTRAM_INDEX_MASK) <<
+ XTG_MSTRAM_INDEX_SHIFT;
+ cmd_words[2] |= (cmdp->other_dpnd & XTG_OTHER_DEPEND_MASK) <<
+ XTG_OTHER_DEPEND_SHIFT;
+ cmd_words[2] |= (cmdp->my_dpnd & XTG_MY_DEPEND_MASK) <<
+ XTG_MY_DEPEND_SHIFT;
+
+ /* Command Word 3 */
+ cmd_words[3] = 0;
+ cmd_words[3] |= (cmdp->qos & XTG_QOS_MASK) << XTG_QOS_SHIFT;
+ cmd_words[3] |= (cmdp->user & XTG_USER_MASK) << XTG_USER_SHIFT;
+ cmd_words[3] |= (cmdp->cache & XTG_CACHE_MASK) << XTG_CACHE_SHIFT;
+ cmd_words[3] |= (cmdp->expected_resp & XTG_EXPECTED_RESP_MASK) <<
+ XTG_EXPECTED_RESP_SHIFT;
+}
+
+/**
+ * xtg_prepare_param_words - Prepares Parameter RAM word
+ * @tg: Pointer to xtg_dev_info structure
+ * @cmdp: Pointer to xtg_pram structure
+ * @param_word: Pointer to Param Word that needs to be prepared
+ */
+static void xtg_prepare_param_word(struct xtg_dev_info *tg,
+ const struct xtg_pram *cmdp, u32 *param_word)
+{
+ *param_word = 0;
+ *param_word |= (cmdp->opcode & XTG_PARAM_OP_MASK) << XTG_PARAM_OP_SHIFT;
+ *param_word |= (cmdp->addr_mode & XTG_PARAM_ADDRMODE_MASK) <<
+ XTG_PARAM_ADDRMODE_SHIFT;
+ *param_word |= (cmdp->id_mode & XTG_PARAM_IDMODE_MASK) <<
+ XTG_PARAM_IDMODE_SHIFT;
+ *param_word |= (cmdp->interval_mode & XTG_PARAM_INTERVALMODE_MASK) <<
+ XTG_PARAM_INTERVALMODE_SHIFT;
+
+ switch (cmdp->opcode) {
+ case XTG_PARAM_OP_RPT:
+ case XTG_PARAM_OP_DELAY:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_COUNT_MASK) <<
+ XTG_PARAM_COUNT_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_FIXEDRPT:
+ *param_word |= (cmdp->op_cntl0 & XTG_PARAM_ADDRRANGE_MASK) <<
+ XTG_PARAM_ADDRRANGE_SHIFT;
+ *param_word |= (cmdp->op_cntl1 & XTG_PARAM_DELAY_MASK) <<
+ XTG_PARAM_DELAY_SHIFT;
+ *param_word |= (cmdp->op_cntl2 & XTG_PARAM_DELAYRANGE_MASK) <<
+ XTG_PARAM_DELAYRANGE_SHIFT;
+ break;
+
+ case XTG_PARAM_OP_NOP:
+ *param_word = 0;
+ break;
+ }
+}
+
+/**
+ * xtg_sysfs_ioctl - Implements sysfs operations
+ * @dev: Device structure
+ * @buf: Value to write
+ * @opcode: Ioctl opcode
+ *
+ * Return: value read from the sysfs opcode.
+ */
+static ssize_t xtg_sysfs_ioctl(struct device *dev, const char *buf,
+ enum xtg_sysfs_ioctl_opcode opcode)
+{
+ struct xtg_dev_info *tg = to_xtg_dev_info(dev);
+ unsigned long wrval;
+ ssize_t status, rdval = 0;
+
+ if (opcode > XTG_GET_STREAM_TRANSFERCNT) {
+ status = kstrtoul(buf, 0, &wrval);
+ if (status < 0)
+ return status;
+ }
+
+ switch (opcode) {
+ case XTG_GET_MASTER_CMP_STS:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_MSTEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_MASTER_LOOP_EN:
+ rdval = (readl(tg->regs + XTG_MCNTL_OFFSET) &
+ XTG_MCNTL_LOOPEN_MASK) ? 1 : 0;
+ break;
+
+ case XTG_GET_SLV_CTRL_REG:
+ rdval = readl(tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_GET_ERR_STS:
+ rdval = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+ break;
+
+ case XTG_GET_CFG_STS:
+ rdval = readl(tg->regs + XTG_CFG_STS_OFFSET);
+ break;
+
+ case XTG_GET_LAST_VALID_INDEX:
+ rdval = (((tg->last_wr_valid_idx << 16) & 0xffff0000) |
+ (tg->last_rd_valid_idx & 0xffff));
+ break;
+
+ case XTG_GET_DEVICE_ID:
+ rdval = tg->id;
+ break;
+
+ case XTG_GET_RESOURCE:
+ rdval = (unsigned long)tg->regs;
+ break;
+
+ case XTG_GET_STATIC_ENABLE:
+ rdval = readl(tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_BURSTLEN:
+ rdval = readl(tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_GET_STATIC_TRANSFERDONE:
+ rdval = (readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ XTG_STATIC_CNTL_TD_MASK);
+ break;
+
+ case XTG_GET_STREAM_ENABLE:
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERLEN:
+ rdval = (readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TLEN_MASK);
+ break;
+
+ case XTG_GET_STREAM_TRANSFERCNT:
+ rdval = ((readl(tg->regs + XTG_STREAM_TL_OFFSET) &
+ XTG_STREAM_TL_TCNT_MASK) >>
+ XTG_STREAM_TL_TCNT_SHIFT);
+ break;
+
+ case XTG_GET_STREAM_TKTS1:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS1_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS2:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS2_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS3:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS3_OFFSET);
+ break;
+ case XTG_GET_STREAM_TKTS4:
+ rdval = readl(tg->regs + XTG_STREAM_TKTS4_OFFSET);
+ break;
+
+ case XTG_GET_STREAM_CFG:
+ rdval = (readl(tg->regs + XTG_STREAM_CFG_OFFSET));
+ break;
+
+ case XTG_START_MASTER_LOGIC:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_MSTEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_MASTER_LOOP_EN:
+ if (wrval)
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) |
+ XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ else
+ writel(readl(tg->regs + XTG_MCNTL_OFFSET) &
+ ~XTG_MCNTL_LOOPEN_MASK,
+ tg->regs + XTG_MCNTL_OFFSET);
+ break;
+
+ case XTG_SET_SLV_CTRL_REG:
+ writel(wrval, tg->regs + XTG_SCNTL_OFFSET);
+ break;
+
+ case XTG_ENABLE_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(wrval, tg->regs + XTG_ERR_EN_OFFSET);
+ break;
+
+ case XTG_CLEAR_ERRORS:
+ wrval &= XTG_ERR_ALL_ERRS_MASK;
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | wrval,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ break;
+
+ case XTG_ENABLE_INTRS:
+ if (wrval & XTG_MASTER_CMP_INTR) {
+ pr_info("Enabling Master Complete Interrupt\n");
+ writel(readl(tg->regs + XTG_ERR_EN_OFFSET) |
+ XTG_ERR_EN_MSTIRQEN_MASK,
+ tg->regs + XTG_ERR_EN_OFFSET);
+ }
+ if (wrval & XTG_MASTER_ERR_INTR) {
+ pr_info("Enabling Interrupt on Master Errors\n");
+ writel(readl(tg->regs + XTG_MSTERR_INTR_OFFSET) |
+ XTG_MSTERR_INTR_MINTREN_MASK,
+ tg->regs + XTG_MSTERR_INTR_OFFSET);
+ }
+ if (wrval & XTG_SLAVE_ERR_INTR) {
+ pr_info("Enabling Interrupt on Slave Errors\n");
+ writel(readl(tg->regs + XTG_SCNTL_OFFSET) |
+ XTG_SCNTL_ERREN_MASK,
+ tg->regs + XTG_SCNTL_OFFSET);
+ }
+ break;
+
+ case XTG_CLEAR_MRAM:
+ xtg_access_rams(tg, tg->xtg_mram_offset,
+ XTG_MASTER_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_CRAM:
+ xtg_access_rams(tg, XTG_COMMAND_RAM_OFFSET,
+ XTG_COMMAND_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_CLEAR_PRAM:
+ xtg_access_rams(tg, XTG_PARAM_RAM_OFFSET,
+ XTG_PARAM_RAM_SIZE,
+ XTG_WRITE_RAM_ZERO, NULL);
+ break;
+
+ case XTG_SET_STATIC_ENABLE:
+ if (wrval) {
+ wrval &= XTG_STATIC_CNTL_STEN_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) &
+ ~XTG_STATIC_CNTL_STEN_MASK,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STATIC_BURSTLEN:
+ writel(wrval, tg->regs + XTG_STATIC_LEN_OFFSET);
+ break;
+
+ case XTG_SET_STATIC_TRANSFERDONE:
+ wrval |= XTG_STATIC_CNTL_TD_MASK;
+ writel(readl(tg->regs + XTG_STATIC_CNTL_OFFSET) | wrval,
+ tg->regs + XTG_STATIC_CNTL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_ENABLE:
+ if (wrval) {
+ rdval = readl(tg->regs + XTG_STREAM_CNTL_OFFSET);
+ rdval |= XTG_STREAM_CNTL_STEN_MASK,
+ writel(rdval,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ } else {
+ writel(readl(tg->regs + XTG_STREAM_CNTL_OFFSET) &
+ ~XTG_STREAM_CNTL_STEN_MASK,
+ tg->regs + XTG_STREAM_CNTL_OFFSET);
+ }
+ break;
+
+ case XTG_SET_STREAM_TRANSFERLEN:
+ wrval &= XTG_STREAM_TL_TLEN_MASK;
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval &= ~XTG_STREAM_TL_TLEN_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_TRANSFERCNT:
+ wrval = ((wrval << XTG_STREAM_TL_TCNT_SHIFT) &
+ XTG_STREAM_TL_TCNT_MASK);
+ rdval = readl(tg->regs + XTG_STREAM_TL_OFFSET);
+ rdval = rdval & ~XTG_STREAM_TL_TCNT_MASK;
+ writel(rdval | wrval,
+ tg->regs + XTG_STREAM_TL_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_TKTS1:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS1_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS2:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS2_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS3:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS3_OFFSET);
+ break;
+ case XTG_SET_STREAM_TKTS4:
+ writel(wrval, tg->regs + XTG_STREAM_TKTS4_OFFSET);
+ break;
+
+ case XTG_SET_STREAM_CFG:
+ writel(wrval, tg->regs + XTG_STREAM_CFG_OFFSET);
+ break;
+
+ default:
+ break;
+ }
+
+ return rdval;
+}
+
+/* Sysfs functions */
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_DEVICE_ID);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_RESOURCE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t master_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_CMP_STS);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t master_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_START_MASTER_LOGIC);
+
+ return size;
+}
+static DEVICE_ATTR_RW(master_start_stop);
+
+static ssize_t config_slave_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_SLV_CTRL_REG);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t config_slave_status_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_SLV_CTRL_REG);
+
+ return size;
+}
+static DEVICE_ATTR_RW(config_slave_status);
+
+static ssize_t err_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_ERR_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t err_sts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_RW(err_sts);
+
+static ssize_t err_en_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_ERRORS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(err_en);
+
+static ssize_t intr_en_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_ENABLE_INTRS);
+
+ return size;
+}
+static DEVICE_ATTR_WO(intr_en);
+
+static ssize_t last_valid_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_LAST_VALID_INDEX);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(last_valid_index);
+
+static ssize_t config_sts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_CFG_STS);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+static DEVICE_ATTR_RO(config_sts);
+
+static ssize_t mram_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_MRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(mram_clear);
+
+static ssize_t cram_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(cram_clear);
+
+static ssize_t pram_clear_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_CLEAR_CRAM);
+
+ return size;
+}
+static DEVICE_ATTR_WO(pram_clear);
+
+static ssize_t static_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t static_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_enable);
+
+static ssize_t static_burstlen_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_BURSTLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_burstlen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_BURSTLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_burstlen);
+
+static ssize_t stream_cfg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_CFG);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_cfg_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_CFG);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_cfg);
+
+static ssize_t stream_tkts4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS4);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts4_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS4);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts4);
+
+static ssize_t stream_tkts3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS3);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS3);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts3);
+
+static ssize_t stream_tkts2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS2);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts2_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS2);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts2);
+
+static ssize_t stream_tkts1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TKTS1);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_tkts1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TKTS1);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_tkts1);
+
+static ssize_t static_transferdone_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t static_transferdone_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STATIC_TRANSFERDONE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(static_transferdone);
+
+static ssize_t reset_static_transferdone_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STATIC_TRANSFERDONE);
+
+ if (rdval == XTG_STATIC_CNTL_RESET_MASK)
+ rdval = 1;
+ else
+ rdval = 0;
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+static DEVICE_ATTR_RO(reset_static_transferdone);
+
+static ssize_t stream_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_ENABLE);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08zx\n", rdval);
+}
+
+static ssize_t stream_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_ENABLE);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_enable);
+
+static ssize_t stream_transferlen_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERLEN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transferlen_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERLEN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transferlen);
+
+static ssize_t stream_transfercnt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_STREAM_TRANSFERCNT);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t stream_transfercnt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_SET_STREAM_TRANSFERCNT);
+
+ return size;
+}
+static DEVICE_ATTR_RW(stream_transfercnt);
+
+static ssize_t loop_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rdval = xtg_sysfs_ioctl(dev, buf, XTG_GET_MASTER_LOOP_EN);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", rdval);
+}
+
+static ssize_t loop_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ xtg_sysfs_ioctl(dev, buf, XTG_MASTER_LOOP_EN);
+
+ return size;
+}
+static DEVICE_ATTR_RW(loop_enable);
+
+static ssize_t xtg_pram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ pr_info("No read access to Parameter RAM\n");
+
+ return 0;
+}
+
+static ssize_t xtg_pram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_PARAM_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 2K PRAM size\n");
+ return -ENOMEM;
+ }
+
+ if (count >= XTG_PARAM_RAM_SIZE)
+ count = XTG_PARAM_RAM_SIZE;
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_pram)) {
+ struct xtg_pram *cmdp = (struct xtg_pram *)buf;
+ u32 param_word;
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare parameter word */
+ xtg_prepare_param_word(tg, cmdp, &param_word);
+
+ count = XTG_PRAM_BYTES_PER_ENTRY;
+ data = &param_word;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_PRM_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+ }
+ }
+
+ off += XTG_PARAM_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_pram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_PARAM_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_PARAM_RAM_SIZE, vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_pram_attr = {
+ .attr = {
+ .name = "parameter_ram",
+ .mode = 0644,
+ },
+ .size = XTG_PARAM_RAM_SIZE,
+ .read = xtg_pram_read,
+ .write = xtg_pram_write,
+ .mmap = xtg_pram_mmap,
+};
+
+static ssize_t xtg_cram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_cram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ u32 *data = (u32 *)buf;
+
+ if (off >= XTG_COMMAND_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K CRAM size\n");
+ return -ENOMEM;
+ }
+
+ /* Program each command */
+ if (count == sizeof(struct xtg_cram)) {
+ struct xtg_cram *cmdp = (struct xtg_cram *)buf;
+ u32 cmd_words[CMD_WDS + EXT_WDS];
+
+ if (!cmdp)
+ return -EINVAL;
+
+ if (cmdp->is_valid_req == VALID_SIG) {
+ /* Prepare command words */
+ xtg_prepare_cmd_words(tg, cmdp, cmd_words);
+ count = XTG_CRAM_BYTES_PER_ENTRY;
+ data = cmd_words;
+
+ /* Maximum command entries are 256 */
+ if (cmdp->index > MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ /* Calculate the block index */
+ if (cmdp->is_write_block)
+ off = XTG_CMD_RAM_BLOCK_SIZE +
+ cmdp->index * count;
+ else
+ off = cmdp->index * count;
+
+ /* Store the valid command index */
+ if (cmdp->valid_cmd) {
+ if (cmdp->is_write_block)
+ tg->last_wr_valid_idx =
+ cmdp->index;
+ else
+ tg->last_rd_valid_idx =
+ cmdp->index;
+ }
+ }
+ }
+
+ off += XTG_COMMAND_RAM_OFFSET;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, data);
+
+ return count;
+}
+
+static int xtg_cram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ XTG_COMMAND_RAM_OFFSET) >> PAGE_SHIFT,
+ XTG_COMMAND_RAM_SIZE + XTG_EXTCMD_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_cram_attr = {
+ .attr = {
+ .name = "command_ram",
+ .mode = 0644,
+ },
+ .size = XTG_COMMAND_RAM_SIZE,
+ .read = xtg_cram_read,
+ .write = xtg_cram_write,
+ .mmap = xtg_cram_mmap,
+};
+
+static ssize_t xtg_mram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_READ_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static ssize_t xtg_mram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+
+ if (off >= XTG_MASTER_RAM_SIZE) {
+ pr_err("Requested Write len exceeds 8K MRAM size\n");
+ return -ENOMEM;
+ }
+
+ off += tg->xtg_mram_offset;
+ xtg_access_rams(tg, off, count, XTG_WRITE_RAM, (u32 *)buf);
+
+ return count;
+}
+
+static int xtg_mram_mmap(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct xtg_dev_info *tg =
+ to_xtg_dev_info(container_of(kobj, struct device, kobj));
+ int ret;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_IO;
+
+ ret = remap_pfn_range(vma, vma->vm_start, (tg->phys_base_addr +
+ tg->xtg_mram_offset) >> PAGE_SHIFT,
+ XTG_MASTER_RAM_SIZE,
+ vma->vm_page_prot);
+ return ret;
+}
+
+static struct bin_attribute xtg_mram_attr = {
+ .attr = {
+ .name = "master_ram",
+ .mode = 0644,
+ },
+ .size = XTG_MASTER_RAM_SIZE,
+ .read = xtg_mram_read,
+ .write = xtg_mram_write,
+ .mmap = xtg_mram_mmap,
+};
+
+static struct bin_attribute *xtg_bin_attrs[] = {
+ &xtg_mram_attr,
+ &xtg_pram_attr,
+ &xtg_cram_attr,
+ NULL,
+};
+
+static const struct attribute *xtg_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_master_start_stop.attr,
+ &dev_attr_config_slave_status.attr,
+ &dev_attr_err_en.attr,
+ &dev_attr_err_sts.attr,
+ &dev_attr_intr_en.attr,
+ &dev_attr_last_valid_index.attr,
+ &dev_attr_config_sts.attr,
+ &dev_attr_mram_clear.attr,
+ &dev_attr_cram_clear.attr,
+ &dev_attr_pram_clear.attr,
+ &dev_attr_static_enable.attr,
+ &dev_attr_static_burstlen.attr,
+ &dev_attr_static_transferdone.attr,
+ &dev_attr_stream_transfercnt.attr,
+ &dev_attr_stream_transferlen.attr,
+ &dev_attr_stream_tkts1.attr,
+ &dev_attr_stream_tkts2.attr,
+ &dev_attr_stream_tkts3.attr,
+ &dev_attr_stream_tkts4.attr,
+ &dev_attr_stream_cfg.attr,
+ &dev_attr_stream_enable.attr,
+ &dev_attr_reset_static_transferdone.attr,
+ &dev_attr_loop_enable.attr,
+ NULL,
+};
+
+static const struct attribute_group xtg_attributes = {
+ .attrs = (struct attribute **)xtg_attrs,
+ .bin_attrs = xtg_bin_attrs,
+};
+
+/**
+ * xtg_cmp_intr_handler - Master Complete Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_cmp_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) |
+ XTG_ERR_STS_MSTDONE_MASK, tg->regs + XTG_ERR_STS_OFFSET);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_err_intr_handler - Master/Slave Error Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the xtg_dev_info structure
+ *
+ * Return: IRQ_HANDLED always
+ */
+static irqreturn_t xtg_err_intr_handler(int irq, void *data)
+{
+ struct xtg_dev_info *tg = (struct xtg_dev_info *)data;
+ u32 value;
+
+ value = readl(tg->regs + XTG_ERR_STS_OFFSET) &
+ XTG_ERR_ALL_ERRS_MASK;
+
+ if (value) {
+ dev_err(tg->dev, "Found errors 0x%08x\n", value);
+ writel(readl(tg->regs + XTG_ERR_STS_OFFSET) | value,
+ tg->regs + XTG_ERR_STS_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xtg_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation and creates sysfs entries for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xtg_probe(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device_node *node;
+ struct resource *res;
+ struct device *dev;
+ int err, irq, var;
+
+ tg = devm_kzalloc(&pdev->dev, sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return -ENOMEM;
+
+ tg->dev = &pdev->dev;
+ dev = tg->dev;
+ node = pdev->dev.of_node;
+
+ /* Map the registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tg->regs))
+ return PTR_ERR(tg->regs);
+
+ /* Save physical base address */
+ tg->phys_base_addr = res->start;
+
+ /* Get the device instance id */
+ err = of_property_read_u32(node, "xlnx,device-id", &tg->id);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ return err;
+ }
+
+ /* Map the error interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "err-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get err irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_err_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ /* Map the completion interrupt, if it exists in the device tree. */
+ irq = platform_get_irq_byname(pdev, "irq-out");
+ if (irq < 0) {
+ dev_dbg(&pdev->dev, "unable to get cmp irq");
+ } else {
+ err = devm_request_irq(&pdev->dev, irq, xtg_cmp_intr_handler,
+ 0, dev_name(&pdev->dev), tg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to request irq %d", irq);
+ return err;
+ }
+ }
+
+ tg->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tg->clk)) {
+ if (PTR_ERR(tg->clk) != -ENOENT) {
+ if (PTR_ERR(tg->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "input clock not found\n");
+ return PTR_ERR(tg->clk);
+ }
+ tg->clk = NULL;
+ }
+
+ err = clk_prepare_enable(tg->clk);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return err;
+ }
+
+ /*
+ * Create sysfs file entries for the device
+ */
+ err = sysfs_create_group(&dev->kobj, &xtg_attributes);
+ if (err < 0) {
+ dev_err(tg->dev, "unable to create sysfs entries\n");
+ clk_disable_unprepare(tg->clk);
+ return err;
+ }
+
+ /*
+ * Initialize the write and read valid index values.
+ * Possible range of values for these variables is <0 255>.
+ */
+ tg->last_wr_valid_idx = -1;
+ tg->last_rd_valid_idx = -1;
+
+ dev_set_drvdata(&pdev->dev, tg);
+
+ /* Update the Proper MasterRam offset */
+ tg->xtg_mram_offset = XTG_MASTER_RAM_OFFSET;
+ var = readl(tg->regs + XTG_MCNTL_OFFSET) >> XTG_MCNTL_REV_SHIFT;
+ if (var == XTG_INIT_VERSION)
+ tg->xtg_mram_offset = XTG_MASTER_RAM_INIT_OFFSET;
+
+ dev_info(&pdev->dev, "Probing xilinx traffic generator success\n");
+
+ return 0;
+}
+
+/**
+ * xtg_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function frees all the resources allocated to the device.
+ *
+ * Return: 0 always
+ */
+static int xtg_remove(struct platform_device *pdev)
+{
+ struct xtg_dev_info *tg;
+ struct device *dev;
+
+ tg = dev_get_drvdata(&pdev->dev);
+ dev = tg->dev;
+ sysfs_remove_group(&dev->kobj, &xtg_attributes);
+ clk_disable_unprepare(tg->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xtg_of_match[] = {
+ { .compatible = "xlnx,axi-traffic-gen", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xtg_of_match);
+
+static struct platform_driver xtg_driver = {
+ .driver = {
+ .name = "xilinx-trafgen",
+ .of_match_table = xtg_of_match,
+ },
+ .probe = xtg_probe,
+ .remove = xtg_remove,
+};
+
+module_platform_driver(xtg_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Traffic Generator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 28091d3f704b..f62e53a4b3dd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -23,20 +23,32 @@
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/pinctrl/consumer.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+
+#define SDHCI_ARASAN_ITAPDLY_REGISTER 0xF0F8
+#define SDHCI_ARASAN_OTAPDLY_REGISTER 0xF0FC
+
#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
+#define SDHCI_ITAPDLY_CHGWIN 0x200
+#define SDHCI_ITAPDLY_ENABLE 0x100
+#define SDHCI_OTAPDLY_ENABLE 0x40
+
/* Default settings for ZynqMP Clock Phases */
#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+#define VERSAL_ICLK_PHASE {0, 132, 132, 0, 132, 0, 0, 162, 90, 0, 0}
+#define VERSAL_OCLK_PHASE {0, 60, 48, 0, 48, 72, 90, 36, 60, 90, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -111,6 +123,8 @@ struct sdhci_arasan_zynqmp_clk_data {
* @clk_data: Struct for the Arasan Controller Clock Data.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
+ * @pinctrl: Per-device pin control state holder.
+ * @pins_default: Pinctrl state for a device.
*/
struct sdhci_arasan_data {
struct sdhci_host *host;
@@ -123,6 +137,8 @@ struct sdhci_arasan_data {
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
unsigned int quirks; /* Arasan deviations from spec */
/* Controller does not have CD wired and will not function normally without */
@@ -130,6 +146,12 @@ struct sdhci_arasan_data {
/* Controller immediately reports SDHCI_CLOCK_INT_STABLE after enabling the
* internal clock even when the clock isn't stable */
#define SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE BIT(1)
+/*
+ * Some of the Arasan variations might not have timing requirements
+ * met at 25MHz for Default Speed mode, those controllers work at
+ * 19MHz instead
+ */
+#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
};
struct sdhci_arasan_of_data {
@@ -251,6 +273,16 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN) {
+ /*
+ * Some of the Arasan variations might not have timing
+ * requirements met at 25MHz for Default Speed mode,
+ * those controllers work at 19MHz instead.
+ */
+ if (clock == DEFAULT_SPEED_MAX_DTR)
+ clock = (DEFAULT_SPEED_MAX_DTR * 19) / 25;
+ }
+
/* Set the Input and Output Clock Phase Delays */
if (clk_data->set_clk_delays)
clk_data->set_clk_delays(host);
@@ -555,6 +587,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "xlnx,zynqmp-8.9a",
.data = &sdhci_arasan_zynqmp_data,
},
+ {
+ .compatible = "xlnx,versal-8.9a",
+ .data = &sdhci_arasan_zynqmp_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -623,7 +659,6 @@ static const struct clk_ops arasan_sampleclk_ops = {
* Return: 0 on success and error value on error
*/
static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
@@ -695,7 +730,6 @@ static const struct clk_ops zynqmp_sdcardclk_ops = {
* Return: 0 on success and error value on error
*/
static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
-
{
struct sdhci_arasan_clk_data *clk_data =
container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
@@ -757,6 +791,151 @@ static const struct clk_ops zynqmp_sampleclk_ops = {
.set_phase = sdhci_zynqmp_sampleclk_set_phase,
};
+/**
+ * sdhci_versal_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_versal_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * Versal does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ u32 regval;
+
+ regval = sdhci_readl(host, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= SDHCI_OTAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ regval |= tap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_OTAPDLY_REGISTER);
+ }
+
+ return ret;
+}
+
+static const struct clk_ops versal_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_versal_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_versal_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_versal_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * Versal does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ if (tap_delay) {
+ u32 regval;
+
+ regval = sdhci_readl(host, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= SDHCI_ITAPDLY_ENABLE;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval |= tap_delay;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ regval &= ~SDHCI_ITAPDLY_CHGWIN;
+ sdhci_writel(host, regval, SDHCI_ARASAN_ITAPDLY_REGISTER);
+ }
+
+ return ret;
+}
+
+static const struct clk_ops versal_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_versal_sampleclk_set_phase,
+};
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -790,6 +969,10 @@ static int arasan_zynqmp_execute_tuning(struct mmc_host *mmc, u32 opcode)
NODE_SD_1;
int err;
+ /* ZynqMP SD controller does not perform auto tuning in DDR50 mode */
+ if (mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ return 0;
+
arasan_zynqmp_dll_reset(host, device_id);
err = sdhci_execute_tuning(mmc, opcode);
@@ -954,6 +1137,16 @@ static void arasan_dt_parse_clk_phases(struct device *dev,
}
}
+ if (of_device_is_compatible(dev->of_node, "xlnx,versal-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) VERSAL_OCLK_PHASE;
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
"clk-phase-legacy");
arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
@@ -1014,6 +1207,8 @@ sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
+ else if (of_device_is_compatible(np, "xlnx,versal-8.9a"))
+ sdcardclk_init.ops = &versal_sdcardclk_ops;
else
sdcardclk_init.ops = &arasan_sdcardclk_ops;
@@ -1068,6 +1263,8 @@ sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
sampleclk_init.ops = &zynqmp_sampleclk_ops;
+ else if (of_device_is_compatible(np, "xlnx,versal-8.9a"))
+ sampleclk_init.ops = &versal_sampleclk_ops;
else
sampleclk_init.ops = &arasan_sampleclk_ops;
@@ -1297,6 +1494,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
+
+ sdhci_arasan->quirks |= SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN;
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
}
arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
@@ -1308,6 +1508,20 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto unreg_clk;
}
+ sdhci_arasan->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(sdhci_arasan->pinctrl)) {
+ sdhci_arasan->pins_default =
+ pinctrl_lookup_state(sdhci_arasan->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(sdhci_arasan->pins_default)) {
+ dev_err(&pdev->dev, "Missing default pinctrl config\n");
+ return IS_ERR(sdhci_arasan->pins_default);
+ }
+
+ pinctrl_select_state(sdhci_arasan->pinctrl,
+ sdhci_arasan->pins_default);
+ }
+
sdhci_arasan->phy = ERR_PTR(-ENODEV);
if (of_device_is_compatible(pdev->dev.of_node,
"arasan,sdhci-5.1")) {
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index cf426956454c..26c8470803bb 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -198,6 +198,9 @@ static int __xipram cfi_chip_setup(struct map_info *map,
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ int extendedId1 = 0;
+ int extendedId2 = 0;
+ int extendedId3 = 0;
int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA;
xip_enable(base, map, cfi);
@@ -222,6 +225,38 @@ static int __xipram cfi_chip_setup(struct map_info *map,
for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
+ /* Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Get device ID cycle 1,2,3 for Numonyx/ST devices */
+ if ((cfi->mfr == CFI_MFR_INTEL || cfi->mfr == CFI_MFR_ST)
+ && ((cfi->id & 0xff) == 0x7e)
+ && (le16_to_cpu(cfi->cfiq->P_ID) == 0x0002)) {
+ extendedId1 = cfi_read_query16(map, base + 0x1 * ofs_factor);
+ extendedId2 = cfi_read_query16(map, base + 0xe * ofs_factor);
+ extendedId3 = cfi_read_query16(map, base + 0xf * ofs_factor);
+ }
+
+ /* Get AMD/Spansion extended JEDEC ID */
+ if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e)
+ cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 |
+ cfi_read_query(map, base + 0xf * ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_qry_mode_off(base, map, cfi);
+ xip_allowed(base, map);
+
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
@@ -231,6 +266,16 @@ static int __xipram cfi_chip_setup(struct map_info *map,
cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
+ /* If the device is a M29EW used in 8-bit mode, adjust buffer size */
+ if ((cfi->cfiq->MaxBufWriteSize > 0x8) && (cfi->mfr == CFI_MFR_INTEL ||
+ cfi->mfr == CFI_MFR_ST) && (extendedId1 == 0x7E) &&
+ (extendedId2 == 0x22 || extendedId2 == 0x23 || extendedId2 == 0x28) &&
+ (extendedId3 == 0x01)) {
+ cfi->cfiq->MaxBufWriteSize = 0x8;
+ pr_warn("Adjusted buffer size on Numonyx flash M29EW family");
+ pr_warn("in 8 bit mode\n");
+ }
+
#ifdef DEBUG_CFI
/* Dump the information therein */
print_cfi_ident(cfi->cfiq);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index a80a46bb5b8b..e7ff8e90b5f9 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -551,4 +551,18 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
load time (assuming you build diskonchip as a module) with the module
parameter "inftl_bbt_write=1".
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan Nand Flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ Enables the driver for the Arasan Nand Flash controller on
+ Zynq Ultrascale+ MPSoC.
+
+config MTD_NAND_PL353
+ tristate "ARM PL353 NAND Flash driver"
+ depends on MTD_RAW_NAND && ARM
+ depends on PL353_SMC
+ help
+ Enables support for PrimeCell Static Memory Controller PL353
+
endif # MTD_RAW_NAND
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 2d136b158fb7..de4c0b145650 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,8 @@ obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan_nand.o
+obj-$(CONFIG_MTD_NAND_PL353) += pl353_nand.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/arasan_nand.c b/drivers/mtd/nand/raw/arasan_nand.c
new file mode 100644
index 000000000000..d7b5a2879a99
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan_nand.c
@@ -0,0 +1,1527 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2017 Xilinx, Inc.
+ * Author: Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mtd/nand_bch.h>
+#include "internals.h"
+#include <linux/pm_runtime.h>
+
+#define EVENT_TIMEOUT_MSEC 1000
+#define ANFC_PM_TIMEOUT 1000 /* ms */
+
+#define PKT_OFST 0x00
+#define PKT_CNT_SHIFT 12
+
+#define MEM_ADDR1_OFST 0x04
+#define MEM_ADDR2_OFST 0x08
+#define PG_ADDR_SHIFT 16
+#define BCH_MODE_SHIFT 25
+#define MEM_ADDR_MASK GENMASK(7, 0)
+#define BCH_MODE_MASK GENMASK(27, 25)
+#define CS_MASK GENMASK(31, 30)
+#define CS_SHIFT 30
+
+#define CMD_OFST 0x0C
+#define ECC_ENABLE BIT(31)
+#define DMA_EN_MASK GENMASK(27, 26)
+#define DMA_ENABLE 0x2
+#define DMA_EN_SHIFT 26
+#define REG_PAGE_SIZE_SHIFT 23
+
+#define PROG_OFST 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_OFST 0x14
+#define INTR_SIG_EN_OFST 0x18
+#define XFER_COMPLETE BIT(2)
+#define READ_READY BIT(1)
+#define WRITE_READY BIT(0)
+#define MBIT_ERROR BIT(3)
+#define EVENT_MASK (XFER_COMPLETE | READ_READY | WRITE_READY | MBIT_ERROR)
+
+#define INTR_STS_OFST 0x1C
+#define READY_STS_OFST 0x20
+#define DMA_ADDR1_OFST 0x24
+#define FLASH_STS_OFST 0x28
+#define DATA_PORT_OFST 0x30
+#define ECC_OFST 0x34
+#define BCH_EN_SHIFT 27
+#define ECC_SIZE_SHIFT 16
+
+#define ECC_ERR_CNT_OFST 0x38
+#define PAGE_ERR_CNT_MASK GENMASK(16, 8)
+#define PKT_ERR_CNT_MASK GENMASK(7, 0)
+
+#define ECC_SPR_CMD_OFST 0x3C
+#define CMD2_SHIFT 8
+#define ADDR_CYCLES_SHIFT 28
+
+#define ECC_ERR_CNT_1BIT_OFST 0x40
+#define ECC_ERR_CNT_2BIT_OFST 0x44
+#define DMA_ADDR0_OFST 0x50
+#define DATA_INTERFACE_OFST 0x6C
+#define ANFC_MAX_CHUNK_SIZE 0x4000
+#define ANFC_MAX_ADDR_CYCLES 7
+
+#define REG_PAGE_SIZE_512 0
+#define REG_PAGE_SIZE_1K 5
+#define REG_PAGE_SIZE_2K 1
+#define REG_PAGE_SIZE_4K 2
+#define REG_PAGE_SIZE_8K 3
+#define REG_PAGE_SIZE_16K 4
+
+#define TEMP_BUF_SIZE 1024
+#define NVDDR_MODE_PACKET_SIZE 8
+#define SDR_MODE_PACKET_SIZE 4
+
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define NVDDR_MODE BIT(9)
+#define NVDDR_TIMING_MODE_SHIFT 3
+#define SDR_MODE_DEFLT_FREQ 80000000
+#define COL_ROW_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+/*
+ * Arasan NAND controller can't detect errors beyond 24-bit in BCH
+ * For an erased page we observed that multibit error count as 16
+ * with 24-bit ECC. so if the count is equal to or greater than 16
+ * then we can say that its an uncorrectable ECC error.
+ */
+#define MULTI_BIT_ERR_CNT 16
+
+struct anfc_op {
+ u32 cmds[4];
+ u32 len;
+ u32 col;
+ u32 row;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct anfc_nand_chip - Defines the nand chip related information
+ * @node: Used to store NAND chips into a list.
+ * @chip: NAND chip information structure.
+ * @strength: Bch or Hamming mode enable/disable.
+ * @ecc_strength: Ecc strength 4.8/12/16.
+ * @eccval: Ecc config value.
+ * @raddr_cycles: Row address cycle information.
+ * @caddr_cycles: Column address cycle information.
+ * @pktsize: Packet size for read / write operation.
+ * @csnum: chipselect number to be used.
+ * @spktsize: Packet size in ddr mode for status operation.
+ * @inftimeval: Data interface and timing mode information
+ */
+struct anfc_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+ bool strength;
+ u32 ecc_strength;
+ u32 eccval;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ u32 pktsize;
+ int csnum;
+ u32 spktsize;
+ u32 inftimeval;
+};
+
+/**
+ * struct anfc_nand_controller - Defines the Arasan NAND flash controller
+ * driver instance
+ * @controller: base controller structure.
+ * @chips: list of all nand chips attached to the ctrler.
+ * @dev: Pointer to the device structure.
+ * @base: Virtual address of the NAND flash device.
+ * @curr_cmd: Current command issued.
+ * @clk_sys: Pointer to the system clock.
+ * @clk_flash: Pointer to the flash clock.
+ * @dma: Dma enable/disable.
+ * @buf: Buffer used for read/write byte operations.
+ * @irq: irq number
+ * @bufshift: Variable used for indexing buffer operation
+ * @csnum: Chip select number currently inuse.
+ * @event: Completion event for nand status events.
+ * @status: Status of the flash device.
+ * @prog: Used to initiate controller operations.
+ * @chip_active: Used to check the chip select state, active or not.
+ */
+struct anfc_nand_controller {
+ struct nand_controller controller;
+ struct list_head chips;
+ struct device *dev;
+ void __iomem *base;
+ int curr_cmd;
+ struct clk *clk_sys;
+ struct clk *clk_flash;
+ int irq;
+ int csnum;
+ struct completion event;
+ int status;
+ u32 prog;
+ u8 buf[TEMP_BUF_SIZE];
+ bool chip_active;
+};
+
+static int anfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = nand->ecc.total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int anfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (section >= nand->ecc.steps)
+ return -ERANGE;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - nand->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops anfc_ooblayout_ops = {
+ .ecc = anfc_ooblayout_ecc,
+ .free = anfc_ooblayout_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP |
+ NAND_BBM_SECONDPAGE,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP |
+ NAND_BBM_SECONDPAGE,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static inline struct anfc_nand_chip *to_anfc_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anfc_nand_chip, chip);
+}
+
+static inline struct anfc_nand_controller *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct anfc_nand_controller, controller);
+}
+
+static u8 anfc_page(u32 pagesize)
+{
+ switch (pagesize) {
+ case 512:
+ return REG_PAGE_SIZE_512;
+ case 1024:
+ return REG_PAGE_SIZE_1K;
+ case 2048:
+ return REG_PAGE_SIZE_2K;
+ case 4096:
+ return REG_PAGE_SIZE_4K;
+ case 8192:
+ return REG_PAGE_SIZE_8K;
+ case 16384:
+ return REG_PAGE_SIZE_16K;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline void anfc_enable_intrs(struct anfc_nand_controller *nfc, u32 val)
+{
+ writel(val, nfc->base + INTR_STS_EN_OFST);
+ writel(val, nfc->base + INTR_SIG_EN_OFST);
+}
+
+static inline void anfc_config_ecc(struct anfc_nand_controller *nfc, bool on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ if (on)
+ val |= ECC_ENABLE;
+ else
+ val &= ~ECC_ENABLE;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline void anfc_config_dma(struct anfc_nand_controller *nfc, int on)
+{
+ u32 val;
+
+ val = readl(nfc->base + CMD_OFST);
+ val &= ~DMA_EN_MASK;
+ if (on)
+ val |= DMA_ENABLE << DMA_EN_SHIFT;
+ writel(val, nfc->base + CMD_OFST);
+}
+
+static inline int anfc_wait_for_event(struct anfc_nand_controller *nfc)
+{
+ return wait_for_completion_timeout(&nfc->event,
+ msecs_to_jiffies(EVENT_TIMEOUT_MSEC));
+}
+
+static inline void anfc_setpktszcnt(struct anfc_nand_controller *nfc,
+ u32 pktsize, u32 pktcount)
+{
+ writel(pktsize | (pktcount << PKT_CNT_SHIFT), nfc->base + PKT_OFST);
+}
+
+static inline void anfc_set_eccsparecmd(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip, u8 cmd1,
+ u8 cmd2)
+{
+ writel(cmd1 | (cmd2 << CMD2_SHIFT) |
+ (achip->caddr_cycles << ADDR_CYCLES_SHIFT),
+ nfc->base + ECC_SPR_CMD_OFST);
+}
+
+static void anfc_setpagecoladdr(struct anfc_nand_controller *nfc, u32 page,
+ u16 col)
+{
+ u32 val;
+
+ writel(col | (page << PG_ADDR_SHIFT), nfc->base + MEM_ADDR1_OFST);
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val = (val & ~MEM_ADDR_MASK) |
+ ((page >> PG_ADDR_SHIFT) & MEM_ADDR_MASK);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+}
+
+static void anfc_prepare_cmd(struct nand_chip *chip, u8 cmd1,
+ u8 cmd2, u8 dmamode,
+ u32 pagesize, u8 addrcycles)
+{
+ u32 regval;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ regval = cmd1 | (cmd2 << CMD2_SHIFT);
+ if (dmamode)
+ regval |= DMA_ENABLE << DMA_EN_SHIFT;
+ regval |= addrcycles << ADDR_CYCLES_SHIFT;
+ regval |= anfc_page(pagesize) << REG_PAGE_SIZE_SHIFT;
+ writel(regval, nfc->base + CMD_OFST);
+}
+
+static void anfc_rw_dma_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, u32 prog, int pktcount, int pktsize)
+{
+ dma_addr_t paddr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 eccintr = 0, dir;
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ eccintr = MBIT_ERROR;
+
+ if (do_read)
+ dir = DMA_FROM_DEVICE;
+ else
+ dir = DMA_TO_DEVICE;
+ paddr = dma_map_single(nfc->dev, buf, len, dir);
+ if (dma_mapping_error(nfc->dev, paddr)) {
+ dev_err(nfc->dev, "Read buffer mapping error");
+ return;
+ }
+ writel(paddr, nfc->base + DMA_ADDR0_OFST);
+ writel((paddr >> 32), nfc->base + DMA_ADDR1_OFST);
+ anfc_enable_intrs(nfc, (XFER_COMPLETE | eccintr));
+ writel(prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ dma_unmap_single(nfc->dev, paddr, len, dir);
+}
+
+static void anfc_rw_pio_op(struct mtd_info *mtd, u8 *buf, int len,
+ bool do_read, int prog, int pktcount, int pktsize)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 *bufptr = (u32 *)buf;
+ u32 cnt = 0, intr = 0;
+
+ anfc_config_dma(nfc, 0);
+
+ if (pktsize == 0)
+ pktsize = len;
+
+ anfc_setpktszcnt(nfc, pktsize, pktcount);
+
+ if (!achip->strength)
+ intr = MBIT_ERROR;
+
+ if (do_read)
+ intr |= READ_READY;
+ else
+ intr |= WRITE_READY;
+ anfc_enable_intrs(nfc, intr);
+ writel(prog, nfc->base + PROG_OFST);
+ while (cnt < pktcount) {
+ anfc_wait_for_event(nfc);
+ cnt++;
+ if (cnt == pktcount)
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ if (do_read)
+ ioread32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_OFST, bufptr,
+ pktsize / 4);
+ bufptr += (pktsize / 4);
+ if (cnt < pktcount)
+ anfc_enable_intrs(nfc, intr);
+ }
+ anfc_wait_for_event(nfc);
+}
+
+static void anfc_read_data_op(struct nand_chip *chip, u8 *buf, int len,
+ int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+ else
+ anfc_rw_pio_op(mtd, buf, len, 1, PROG_PGRD, pktcount, pktsize);
+}
+
+static void anfc_write_data_op(struct nand_chip *chip, const u8 *buf,
+ int len, int pktcount, int pktsize)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (virt_addr_valid(buf))
+ anfc_rw_dma_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+ else
+ anfc_rw_pio_op(mtd, (char *)buf, len, 0, PROG_PGPROG, pktcount,
+ pktsize);
+}
+
+static int anfc_read_page(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ u32 ret;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_read_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+ if (oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ return 0;
+}
+
+static int anfc_write_page(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ u32 ret;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_write_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (oob_required)
+ chip->ecc.write_oob(chip, page);
+
+ return 0;
+}
+
+static int anfc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *ecc_code = chip->ecc.code_buf;
+ u8 *p;
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int stat = 0, i;
+ u32 ret;
+ unsigned int max_bitflips = 0;
+ u32 eccsteps = chip->ecc.steps;
+ u32 one_bit_err = 0, multi_bit_err = 0;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART);
+ anfc_config_ecc(nfc, true);
+ anfc_read_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (achip->strength) {
+ /*
+ * In BCH mode Arasan NAND controller can correct ECC upto
+ * 24-bit Beyond that, it can't even detect errors.
+ */
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_OFST);
+ multi_bit_err = ((multi_bit_err & PAGE_ERR_CNT_MASK) >> 8);
+ } else {
+ /*
+ * In Hamming mode Arasan NAND controller can correct ECC upto
+ * 1-bit and can detect upto 2-bit errors.
+ */
+ one_bit_err = readl(nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ multi_bit_err = readl(nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ /* Clear ecc error count register 1Bit, 2Bit */
+ writel(0x0, nfc->base + ECC_ERR_CNT_1BIT_OFST);
+ writel(0x0, nfc->base + ECC_ERR_CNT_2BIT_OFST);
+ }
+
+ anfc_config_ecc(nfc, false);
+ if (oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ if (multi_bit_err || one_bit_err) {
+ if (!oob_required)
+ chip->ecc.read_oob(chip, page);
+
+ mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ eccsteps = chip->ecc.steps;
+ p = buf;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes,
+ p += eccsize) {
+ stat = nand_check_erased_ecc_chunk(p,
+ chip->ecc.size,
+ &ecc_code[i],
+ eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (stat < 0) {
+ stat = 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ stat);
+ }
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int anfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ anfc_set_eccsparecmd(nfc, achip, NAND_CMD_RNDIN, 0);
+ anfc_config_ecc(nfc, true);
+ anfc_write_data_op(chip, buf, mtd->writesize,
+ DIV_ROUND_UP(mtd->writesize, achip->pktsize),
+ achip->pktsize);
+
+ if (oob_required)
+ chip->ecc.write_oob(chip, page);
+
+ anfc_config_ecc(nfc, false);
+
+ return 0;
+}
+
+static int anfc_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc, int ecc_mode)
+{
+ u32 ecc_addr;
+ unsigned int ecc_strength, steps;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ if (ecc_mode == NAND_ECC_ON_DIE) {
+ anfc_config_ecc(nfc, 0);
+ ecc->strength = 1;
+ ecc->bytes = 0;
+ ecc->size = mtd->writesize;
+ ecc->read_page = anfc_read_page;
+ ecc->write_page = anfc_write_page;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ } else {
+ ecc->mode = NAND_ECC_HW;
+ ecc->read_page = anfc_read_page_hwecc;
+ ecc->write_page = anfc_write_page_hwecc;
+
+ mtd_set_ooblayout(mtd, &anfc_ooblayout_ops);
+ steps = mtd->writesize / chip->base.eccreq.step_size;
+
+ switch (chip->base.eccreq.strength) {
+ case 12:
+ ecc_strength = 0x1;
+ break;
+ case 8:
+ ecc_strength = 0x2;
+ break;
+ case 4:
+ ecc_strength = 0x3;
+ break;
+ case 24:
+ ecc_strength = 0x4;
+ break;
+ default:
+ ecc_strength = 0x0;
+ }
+ if (!ecc_strength)
+ ecc->total = 3 * steps;
+ else
+ ecc->total =
+ DIV_ROUND_UP(fls(8 * chip->base.eccreq.step_size) *
+ chip->base.eccreq.strength * steps, 8);
+ ecc->strength = chip->base.eccreq.strength;
+ ecc->size = chip->base.eccreq.step_size;
+ ecc->bytes = ecc->total / steps;
+ ecc->steps = steps;
+ achip->ecc_strength = ecc_strength;
+ achip->strength = achip->ecc_strength;
+ ecc_addr = mtd->writesize + (mtd->oobsize - ecc->total);
+ achip->eccval = ecc_addr | (ecc->total << ECC_SIZE_SHIFT) |
+ (achip->strength << BCH_EN_SHIFT);
+ }
+
+ if (chip->base.eccreq.step_size >= 1024)
+ achip->pktsize = 1024;
+ else
+ achip->pktsize = 512;
+
+ return 0;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id;
+ int i = 0;
+
+ memset(nfc_op, 0, sizeof(struct anfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int naddrs;
+
+ instr = &subop->instrs[op_id];
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id)
+ nfc_op->cmds[1] = instr->ctx.cmd.opcode;
+ else
+ nfc_op->cmds[0] = instr->ctx.cmd.opcode;
+ nfc->curr_cmd = nfc_op->cmds[0];
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ i = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+
+ for (; i < naddrs; i++) {
+ u8 val = instr->ctx.addr.addrs[i];
+
+ if (nfc_op->cmds[0] == NAND_CMD_ERASE1) {
+ nfc_op->row |= COL_ROW_ADDR(i, val);
+ } else {
+ if (i < 2)
+ nfc_op->col |= COL_ROW_ADDR(i,
+ val);
+ else
+ nfc_op->row |= COL_ROW_ADDR(i -
+ 2, val);
+ }
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ break;
+ }
+ }
+}
+
+static int anfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_op nfc_op = {};
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Do not execute commands other than NAND_CMD_RESET
+ * Other commands have their own patterns
+ * If there is no pattern match, that means controller
+ * is not supporting that pattern.
+ */
+ if (nfc_op.cmds[0] != NAND_CMD_RESET)
+ return 0;
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], 0, 0, 0, 0);
+ nfc->prog = PROG_RST;
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ return 0;
+}
+
+static int anfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_RDID;
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_RDID, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_op nfc_op = {};
+ unsigned int op_id, len;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], 0, 0, 0, 0);
+ anfc_setpktszcnt(nfc, achip->spktsize / 4, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ nfc->prog = PROG_STATUS;
+
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ /*
+ * The Arasan NAND controller will update the status value
+ * returned by the flash device in FLASH_STS register.
+ */
+ nfc->status = readl(nfc->base + FLASH_STS_OFST);
+ memcpy(instr->ctx.data.buf.in, &nfc->status, len);
+
+ return 0;
+}
+
+static int anfc_erase_zerolenpg_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 dma_mode = 0, write_size = 0, addrcycles = 0, len, op_id;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ nfc->prog = PROG_ERASE;
+ addrcycles = achip->raddr_cycles;
+ nfc_op.col = nfc_op.row & 0xffff;
+ nfc_op.row = (nfc_op.row >> PG_ADDR_SHIFT) & 0xffff;
+ } else if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], nfc_op.cmds[1], dma_mode,
+ write_size, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (nfc_op.cmds[0] == NAND_CMD_ERASE1) {
+ anfc_enable_intrs(nfc, XFER_COMPLETE);
+ writel(nfc->prog, nfc->base + PROG_OFST);
+ anfc_wait_for_event(nfc);
+ }
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_read_data_op(chip, instr->ctx.data.buf.in, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_read_param_get_feature_sp_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop
+ *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 dma_mode = 0, addrcycles = 1, write_size = 0;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ if (nfc_op.cmds[0] == NAND_CMD_PARAM) {
+ nfc->prog = PROG_RDPARAM;
+ } else if (nfc_op.cmds[0] == NAND_CMD_GET_FEATURES) {
+ nfc->prog = PROG_GET_FEATURE;
+ } else if (nfc_op.cmds[0] == NAND_CMD_READ0) {
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ write_size = mtd->writesize;
+ dma_mode = 1;
+ }
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], 0, dma_mode, write_size,
+ addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_random_datain_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, PROG_PGRD, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_setfeature_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_SET_FEATURE;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_change_read_column_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, 2);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_page_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+
+ anfc_rw_pio_op(mtd, nfc->buf, roundup(len, 4), 1, nfc->prog, 1, 0);
+ memcpy(instr->ctx.data.buf.in, nfc->buf, len);
+
+ return 0;
+}
+
+static int anfc_zero_len_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ nfc->prog = PROG_PGRD;
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ return 0;
+}
+
+static int anfc_page_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ unsigned int op_id, len;
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], nfc_op.cmds[1], 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+ if (!nfc_op.data_instr)
+ return 0;
+
+ len = nand_subop_get_data_len(subop, op_id);
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out, len, 1, 0);
+
+ return 0;
+}
+
+static int anfc_page_write_nowait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ const struct nand_op_instr *instr;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 addrcycles;
+
+ anfc_parse_instructions(chip, subop, &nfc_op);
+ instr = nfc_op.data_instr;
+ nfc->prog = PROG_PGPROG;
+
+ addrcycles = achip->raddr_cycles + achip->caddr_cycles;
+ anfc_prepare_cmd(chip, nfc_op.cmds[0], NAND_CMD_PAGEPROG, 1,
+ mtd->writesize, addrcycles);
+ anfc_setpagecoladdr(nfc, nfc_op.row, nfc_op.col);
+
+ if (!nfc_op.data_instr)
+ return 0;
+
+ anfc_write_data_op(chip, (char *)instr->ctx.data.buf.out,
+ mtd->writesize, DIV_ROUND_UP(mtd->writesize,
+ achip->pktsize), achip->pktsize);
+
+ return 0;
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ /* Use a separate function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ anfc_random_datain_type_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_change_read_column_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_zerolenpg_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_setfeature_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_page_write_nowait_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_read_param_get_feature_sp_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_zero_len_page_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYCLES)),
+ );
+
+static void anfc_select_chip(struct nand_chip *chip, int num)
+{
+ u32 val;
+ int ret;
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ struct anfc_nand_controller *nfc = to_anfc(chip->controller);
+
+ if (num < 0) {
+ nfc->chip_active = false;
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return;
+ }
+
+ nfc->chip_active = true;
+ ret = pm_runtime_get_sync(nfc->dev);
+ if (ret < 0) {
+ dev_err(nfc->dev, "runtime_get_sync failed\n");
+ return;
+ }
+
+ val = readl(nfc->base + MEM_ADDR2_OFST);
+ val &= (val & ~(CS_MASK | BCH_MODE_MASK));
+ val |= (achip->csnum << CS_SHIFT) |
+ (achip->ecc_strength << BCH_MODE_SHIFT);
+ writel(val, nfc->base + MEM_ADDR2_OFST);
+ nfc->csnum = achip->csnum;
+ writel(achip->eccval, nfc->base + ECC_OFST);
+ writel(achip->inftimeval, nfc->base + DATA_INTERFACE_OFST);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ anfc_select_chip(chip, op->cs);
+ return nand_op_parser_exec_op(chip, &anfc_op_parser,
+ op, check_only);
+}
+
+static irqreturn_t anfc_irq_handler(int irq, void *ptr)
+{
+ struct anfc_nand_controller *nfc = ptr;
+ u32 status;
+
+ status = readl(nfc->base + INTR_STS_OFST);
+ if (status & EVENT_MASK) {
+ writel(status & EVENT_MASK, nfc->base + INTR_STS_OFST);
+ writel(0, nfc->base + INTR_STS_EN_OFST);
+ writel(0, nfc->base + INTR_SIG_EN_OFST);
+ complete(&nfc->event);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int anfc_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anfc_nand_chip *achip = to_anfc_nand(chip);
+ u32 ret;
+
+ if (mtd->writesize <= SZ_512)
+ achip->caddr_cycles = 1;
+ else
+ achip->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ achip->raddr_cycles = 3;
+ else
+ achip->raddr_cycles = 2;
+
+ chip->ecc.calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ chip->ecc.code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ret = anfc_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct nand_controller_ops anfc_nand_controller_ops = {
+ .attach_chip = anfc_nand_attach_chip,
+ .exec_op = anfc_exec_op,
+};
+
+static int anfc_init_timing_mode(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *achip)
+{
+ struct nand_chip *chip = &achip->chip;
+ int mode, err;
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ u32 inftimeval;
+ bool change_sdr_clk = false;
+
+ memset(feature, 0, sizeof(feature));
+ /* Get nvddr timing modes */
+ mode = onfi_get_sync_timing_mode(chip) & 0xff;
+ if (!mode) {
+ mode = fls(onfi_get_async_timing_mode(chip)) - 1;
+ inftimeval = mode;
+ if (mode >= 2 && mode <= 5)
+ change_sdr_clk = true;
+ } else {
+ mode = fls(mode) - 1;
+ inftimeval = NVDDR_MODE | (mode << NVDDR_TIMING_MODE_SHIFT);
+ mode |= ONFI_DATA_INTERFACE_NVDDR;
+ }
+
+ feature[0] = mode;
+ nand_select_target(chip, achip->csnum);
+ anfc_prepare_cmd(chip, NAND_CMD_SET_FEATURES, 0, 0, 0, 1);
+ anfc_setpagecoladdr(nfc, 0x0, ONFI_FEATURE_ADDR_TIMING_MODE);
+
+ anfc_write_data_op(chip, feature, sizeof(feature), 1, 0);
+ nand_deselect_target(chip);
+
+ /*
+ * SDR timing modes 2-5 will not work for the arasan nand when
+ * freq > 90 MHz, so reduce the freq in SDR modes 2-5 to < 90Mhz
+ */
+ if (change_sdr_clk) {
+ clk_disable_unprepare(nfc->clk_sys);
+ err = clk_set_rate(nfc->clk_sys, SDR_MODE_DEFLT_FREQ);
+ if (err) {
+ dev_err(nfc->dev, "Can't set the clock rate\n");
+ return err;
+ }
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(nfc->dev, "Unable to enable sys clock.\n");
+ clk_disable_unprepare(nfc->clk_sys);
+ return err;
+ }
+ }
+ achip->inftimeval = inftimeval;
+
+ if (mode & ONFI_DATA_INTERFACE_NVDDR)
+ achip->spktsize = NVDDR_MODE_PACKET_SIZE;
+
+ return 0;
+}
+
+static int anfc_nand_chip_init(struct anfc_nand_controller *nfc,
+ struct anfc_nand_chip *anand_chip,
+ struct device_node *np)
+{
+ struct nand_chip *chip = &anand_chip->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &anand_chip->csnum);
+ if (ret) {
+ dev_err(nfc->dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, "arasan_nand.%d",
+ anand_chip->csnum);
+ mtd->dev.parent = nfc->dev;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ chip->legacy.select_chip = anfc_select_chip;
+ nand_set_flash_node(chip, np);
+
+ anand_chip->spktsize = SDR_MODE_PACKET_SIZE;
+
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ dev_err(nfc->dev, "nand_scan_tail for NAND failed\n");
+ return ret;
+ }
+ ret = anfc_init_timing_mode(nfc, anand_chip);
+ if (ret) {
+ dev_err(nfc->dev, "timing mode init failed\n");
+ return ret;
+ }
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc;
+ struct anfc_nand_chip *anand_chip;
+ struct device_node *np = pdev->dev.of_node, *child;
+ struct resource *res;
+ int err;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nand_controller_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+ init_completion(&nfc->event);
+ nfc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, nfc);
+ nfc->csnum = -1;
+ nfc->controller.ops = &anfc_nand_controller_ops;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+ nfc->irq = platform_get_irq(pdev, 0);
+ if (nfc->irq < 0)
+ return -ENXIO;
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = devm_request_irq(&pdev->dev, nfc->irq, anfc_irq_handler,
+ 0, "arasannfc", nfc);
+ if (err)
+ return err;
+ nfc->clk_sys = devm_clk_get(&pdev->dev, "clk_sys");
+ if (IS_ERR(nfc->clk_sys)) {
+ dev_err(&pdev->dev, "sys clock not found.\n");
+ return PTR_ERR(nfc->clk_sys);
+ }
+
+ nfc->clk_flash = devm_clk_get(&pdev->dev, "clk_flash");
+ if (IS_ERR(nfc->clk_flash)) {
+ dev_err(&pdev->dev, "flash clock not found.\n");
+ return PTR_ERR(nfc->clk_flash);
+ }
+
+ err = clk_prepare_enable(nfc->clk_sys);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable sys clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(nfc->clk_flash);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable flash clock.\n");
+ goto clk_dis_sys;
+ }
+
+ pm_runtime_set_autosuspend_delay(nfc->dev, ANFC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(nfc->dev);
+ pm_runtime_set_active(nfc->dev);
+ pm_runtime_get_noresume(nfc->dev);
+ pm_runtime_enable(nfc->dev);
+ for_each_available_child_of_node(np, child) {
+ anand_chip = devm_kzalloc(&pdev->dev, sizeof(*anand_chip),
+ GFP_KERNEL);
+ if (!anand_chip) {
+ of_node_put(child);
+ err = -ENOMEM;
+ goto nandchip_clean_up;
+ }
+ err = anfc_nand_chip_init(nfc, anand_chip, child);
+ if (err) {
+ devm_kfree(&pdev->dev, anand_chip);
+ continue;
+ }
+
+ list_add_tail(&anand_chip->node, &nfc->chips);
+ }
+ pm_runtime_mark_last_busy(nfc->dev);
+ pm_runtime_put_autosuspend(nfc->dev);
+ return 0;
+
+nandchip_clean_up:
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(&anand_chip->chip);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(nfc->clk_flash);
+clk_dis_sys:
+ clk_disable_unprepare(nfc->clk_sys);
+
+ return err;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct anfc_nand_controller *nfc = platform_get_drvdata(pdev);
+ struct anfc_nand_chip *anand_chip;
+
+ list_for_each_entry(anand_chip, &nfc->chips, node)
+ nand_release(&anand_chip->chip);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+
+ clk_disable_unprepare(nfc->clk_sys);
+ clk_disable_unprepare(nfc->clk_flash);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ { .compatible = "arasan,nfc-v3p10" },
+ { .compatible = "xlnx,zynqmp-nand" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static int anfc_suspend(struct device *dev)
+{
+ return pm_runtime_put_sync(dev);
+}
+
+static int anfc_resume(struct device *dev)
+{
+ return pm_runtime_get_sync(dev);
+}
+
+static int __maybe_unused anfc_runtime_suspend(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ clk_disable(nfc->clk_sys);
+ clk_disable(nfc->clk_flash);
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_idle(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+
+ if (nfc->chip_active)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int __maybe_unused anfc_runtime_resume(struct device *dev)
+{
+ struct anfc_nand_controller *nfc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(nfc->clk_sys);
+ if (ret) {
+ dev_err(dev, "Cannot enable sys clock.\n");
+ return ret;
+ }
+
+ ret = clk_enable(nfc->clk_flash);
+ if (ret) {
+ dev_err(dev, "Cannot enable flash clock.\n");
+ clk_disable(nfc->clk_sys);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops anfc_pm_ops = {
+ .resume = anfc_resume,
+ .suspend = anfc_suspend,
+ .runtime_resume = anfc_runtime_resume,
+ .runtime_suspend = anfc_runtime_suspend,
+ .runtime_idle = anfc_runtime_idle,
+};
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ .pm = &anfc_pm_ops,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 4f1bb862b62f..393cf5cf0f3c 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -467,9 +467,11 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
* if we don't do this. I have no clue why, but I seem to have 'fixed'
* it in the doc2000 driver in August 1999. dwmw2.
*/
- ret = nand_reset(chip, chipnr);
- if (ret)
- return ret;
+ if (chip->parameters.onfi->jedec_id == NAND_MFR_TOSHIBA) {
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
+ }
nand_select_target(chip, chipnr);
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 8fe8d7bdd203..a6873750e857 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -294,6 +294,8 @@ int nand_onfi_detect(struct nand_chip *chip)
onfi->tR = le16_to_cpu(p->t_r);
onfi->tCCS = le16_to_cpu(p->t_ccs);
onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->jedec_id = le16_to_cpu(p->jedec_id);
+ onfi->src_sync_timing_mode = le16_to_cpu(p->src_sync_timing_mode);
onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
chip->parameters.onfi = onfi;
diff --git a/drivers/mtd/nand/raw/pl353_nand.c b/drivers/mtd/nand/raw/pl353_nand.c
new file mode 100644
index 000000000000..4bb12a508fe1
--- /dev/null
+++ b/drivers/mtd/nand/raw/pl353_nand.c
@@ -0,0 +1,1234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM PL353 NAND flash controller driver
+ *
+ * Copyright (C) 2017 Xilinx, Inc
+ * Author: Punnaiah chowdary kalluri <punnaiah@xilinx.com>
+ * Author: Naga Sureshkumar Relli <nagasure@xilinx.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pl353-smc.h>
+#include <linux/clk.h>
+
+#define PL353_NAND_DRIVER_NAME "pl353-nand"
+
+/* NAND flash driver defines */
+#define PL353_NAND_ECC_SIZE 512 /* Size of data for ECC operation */
+
+/* AXI Address definitions */
+#define START_CMD_SHIFT 3
+#define END_CMD_SHIFT 11
+#define END_CMD_VALID_SHIFT 20
+#define ADDR_CYCLES_SHIFT 21
+#define CLEAR_CS_SHIFT 21
+#define ECC_LAST_SHIFT 10
+#define COMMAND_PHASE (0 << 19)
+#define DATA_PHASE BIT(19)
+#define GET_ADDR(pos, val) (((val) & 0xFF) << (8 * (pos)))
+
+#define PL353_NAND_ECC_LAST BIT(ECC_LAST_SHIFT) /* Set ECC_Last */
+#define PL353_NAND_CLEAR_CS BIT(CLEAR_CS_SHIFT) /* Clear chip select */
+
+#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_DEV_BUSY_TIMEOUT (1 * HZ)
+#define PL353_NAND_LAST_TRANSFER_LENGTH 4
+#define PL353_NAND_ECC_VALID_SHIFT 24
+#define PL353_NAND_ECC_VALID_MASK 0x40
+#define PL353_ECC_BITS_BYTEOFF_MASK 0x1FF
+#define PL353_ECC_BITS_BITOFF_MASK 0x7
+#define PL353_ECC_BIT_MASK 0xFFF
+#define PL353_TREA_MAX_VALUE 1
+#define PL353_MAX_ECC_CHUNKS 4
+#define PL353_MAX_ECC_BYTES 3
+#define PL353_MAX_CHUNK_SIZE 2112
+
+struct pl353_nfc_op {
+ u32 cmnds[2];
+ u32 addrs;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ const struct nand_op_instr *data_instr;
+};
+
+/**
+ * struct pl353_nand_controller - Defines the NAND flash controller driver
+ * instance
+ * @controller: NAND controller structure
+ * @chip: NAND chip information structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: Virtual address of the NAND flash device
+ * @dataphase_addrflags:Flags required for data phase transfers
+ * @addr_cycles: Address cycles
+ * @mclk_rate: Clock rate of the Memory controller
+ * @buswidth: Bus width 8 or 16
+ */
+struct pl353_nand_controller {
+ struct nand_controller controller;
+ struct nand_chip chip;
+ struct device *dev;
+ void __iomem *regs;
+ u32 dataphase_addrflags;
+ u8 addr_cycles;
+ ulong mclk_rate;
+ u32 buswidth;
+};
+
+static inline struct pl353_nand_controller *
+ to_pl353_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct pl353_nand_controller, chip);
+}
+
+static int pl353_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes);
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout16_ops = {
+ .ecc = pl353_ecc_ooblayout16_ecc,
+ .free = pl353_ecc_ooblayout16_free,
+};
+
+static int pl353_ecc_ooblayout64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * chip->ecc.bytes) + 52;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int pl353_ecc_ooblayout64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = 50;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pl353_ecc_ooblayout64_ops = {
+ .ecc = pl353_ecc_ooblayout64_ecc,
+ .free = pl353_ecc_ooblayout64_free,
+};
+
+/* Generic flash bbt decriptors */
+static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static void pl353_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ int ret;
+ struct pl353_nand_controller *xnfc =
+ container_of(chip, struct pl353_nand_controller, chip);
+
+ if (xnfc->buswidth == 8)
+ return;
+
+ if (force_8bit)
+ ret = pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8);
+ else
+ ret = pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+
+ if (ret)
+ dev_err(xnfc->dev, "Error in Buswidth\n");
+}
+
+static inline int pl353_wait_for_dev_ready(struct nand_chip *chip)
+{
+ unsigned long timeout = jiffies + PL353_NAND_DEV_BUSY_TIMEOUT;
+
+ while (!pl353_smc_get_nand_int_status_raw()) {
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+ cond_resched();
+ }
+
+ pl353_smc_clr_nand_int();
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_data_op - read chip data into buffer
+ * @chip: Pointer to the NAND chip info structure
+ * @in: Pointer to the buffer to store read data
+ * @len: Number of bytes to read
+ * @force_8bit: Force 8-bit bus access
+ * Return: Always return zero
+ */
+static void pl353_nand_read_data_op(struct nand_chip *chip, u8 *in,
+ unsigned int len, bool force_8bit)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ int i;
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)in, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || !force_8bit) {
+ u32 *ptr = (u32 *)in;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ ptr[i] = readl(xnfc->regs + xnfc->dataphase_addrflags);
+ } else {
+ for (i = 0; i < len; i++)
+ in[i] = readb(xnfc->regs + xnfc->dataphase_addrflags);
+ }
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+}
+
+/**
+ * pl353_nand_write_data_op - write buffer to chip
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the buffer to store write data
+ * @len: Number of bytes to write
+ * @force_8bit: Force 8-bit bus access
+ */
+static void pl353_nand_write_data_op(struct nand_chip *chip, const u8 *buf,
+ int len, bool force_8bit)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ int i;
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, true);
+
+ if ((IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+ IS_ALIGNED(len, sizeof(uint32_t))) || !force_8bit) {
+ u32 *ptr = (u32 *)buf;
+
+ len /= 4;
+ for (i = 0; i < len; i++)
+ writel(ptr[i], xnfc->regs + xnfc->dataphase_addrflags);
+ } else {
+ for (i = 0; i < len; i++)
+ writeb(buf[i], xnfc->regs + xnfc->dataphase_addrflags);
+ }
+
+ if (force_8bit)
+ pl353_nfc_force_byte_access(chip, false);
+}
+
+static inline int pl353_wait_for_ecc_done(void)
+{
+ unsigned long timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT;
+
+ while (pl353_smc_ecc_is_busy()) {
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+ cond_resched();
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_calculate_hwecc - Calculate Hardware ECC
+ * @chip: Pointer to the nand_chip structure
+ * @data: Pointer to the page data
+ * @ecc: Pointer to the ECC buffer where ECC data needs to be stored
+ *
+ * This function retrieves the Hardware ECC data from the controller and returns
+ * ECC data back to the MTD subsystem.
+ * It operates on a number of 512 byte blocks of NAND memory and can be
+ * programmed to store the ECC codes after the data in memory. For writes,
+ * the ECC is written to the spare area of the page. For reads, the result of
+ * a block ECC check are made available to the device driver.
+ *
+ * ------------------------------------------------------------------------
+ * | n * 512 blocks | extra | ecc | |
+ * | | block | codes | |
+ * ------------------------------------------------------------------------
+ *
+ * The ECC calculation uses a simple Hamming code, using 1-bit correction 2-bit
+ * detection. It starts when a valid read or write command with a 512 byte
+ * aligned address is detected on the memory interface.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_calculate_hwecc(struct nand_chip *chip,
+ const u8 *data, u8 *ecc)
+{
+ u32 ecc_value;
+ u8 chunk, ecc_byte, ecc_status;
+
+ for (chunk = 0; chunk < PL353_MAX_ECC_CHUNKS; chunk++) {
+ /* Read ECC value for each block */
+ ecc_value = pl353_smc_get_ecc_val(chunk);
+ ecc_status = (ecc_value >> PL353_NAND_ECC_VALID_SHIFT);
+
+ /* ECC value valid */
+ if (ecc_status & PL353_NAND_ECC_VALID_MASK) {
+ for (ecc_byte = 0; ecc_byte < PL353_MAX_ECC_BYTES;
+ ecc_byte++) {
+ /* Copy ECC bytes to MTD buffer */
+ *ecc = ~ecc_value & 0xFF;
+ ecc_value = ecc_value >> 8;
+ ecc++;
+ }
+ } else {
+ pr_warn("%s status failed\n", __func__);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_correct_data - ECC correction function
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the page data
+ * @read_ecc: Pointer to the ECC value read from spare data area
+ * @calc_ecc: Pointer to the calculated ECC value
+ *
+ * This function corrects the ECC single bit errors & detects 2-bit errors.
+ *
+ * Return: 0 if no ECC errors found
+ * 1 if single bit error found and corrected.
+ * -1 if multiple uncorrectable ECC errors found.
+ */
+static int pl353_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ unsigned char bit_addr;
+ unsigned int byte_addr;
+ unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
+ PL353_ECC_BIT_MASK;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
+ PL353_ECC_BIT_MASK;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ /* no error */
+ if (!ecc_odd && !ecc_even)
+ return 0;
+
+ if (ecc_odd == (~ecc_even & PL353_ECC_BIT_MASK)) {
+ /* bits [11:3] of error code is byte offset */
+ byte_addr = (ecc_odd >> 3) & PL353_ECC_BITS_BYTEOFF_MASK;
+ /* bits [2:0] of error code is bit offset */
+ bit_addr = ecc_odd & PL353_ECC_BITS_BITOFF_MASK;
+ /* Toggling error bit */
+ buf[byte_addr] ^= (BIT(bit_addr));
+ return 1;
+ }
+
+ /* one error in parity */
+ if (hweight32(ecc_odd | ecc_even) == 1)
+ return 1;
+
+ /* Uncorrectable error */
+ return -1;
+}
+
+static void pl353_prepare_cmd(struct nand_chip *chip,
+ int page, int column, int start_cmd, int end_cmd,
+ bool read)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ unsigned long cmd_phase_data = 0;
+ u32 end_cmd_valid = 0, cmdphase_addrflags;
+
+ end_cmd_valid = read ? 1 : 0;
+ cmdphase_addrflags = ((xnfc->addr_cycles
+ << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (start_cmd << START_CMD_SHIFT));
+
+ /* Get the data phase address */
+ xnfc->dataphase_addrflags = ((0x0 << CLEAR_CS_SHIFT) |
+ (0 << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column /= 2;
+
+ cmd_phase_data = column;
+ if (mtd->writesize > PL353_NAND_ECC_SIZE) {
+ cmd_phase_data |= page << 16;
+
+ /* Another address cycle for devices > 128MiB */
+ if (chip->options & NAND_ROW_ADDR_3) {
+ writel_relaxed(cmd_phase_data,
+ xnfc->regs + cmdphase_addrflags);
+ cmd_phase_data = (page >> 16);
+ }
+ } else {
+ cmd_phase_data |= page << 8;
+ }
+
+ writel_relaxed(cmd_phase_data, xnfc->regs + cmdphase_addrflags);
+}
+
+/**
+ * pl353_nand_read_oob - [REPLACEABLE] the most common OOB data read function
+ * @chip: Pointer to the nand_chip structure
+ * @chip: Pointer to the nand_chip structure
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_oob(struct nand_chip *chip,
+ int page)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *p;
+
+ if (mtd->writesize < PL353_NAND_ECC_SIZE)
+ return 0;
+
+ pl353_prepare_cmd(chip, page, mtd->writesize, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_oob - [REPLACEABLE] the most common OOB data write function
+ * @chip: Pointer to the nand_chip structure
+ * @chip: Pointer to the NAND chip info structure
+ * @page: Page number to write
+ *
+ * Return: Zero on success and EIO on failure
+ */
+static int pl353_nand_write_oob(struct nand_chip *chip,
+ int page)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *buf = chip->oob_poi;
+
+ pl353_prepare_cmd(chip, page, mtd->writesize, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ pl353_nand_write_data_op(chip, buf,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ buf += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ xnfc->dataphase_addrflags |= (1 << END_CMD_VALID_SHIFT);
+ pl353_nand_write_data_op(chip, buf, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_raw - [Intern] read page data without ecc
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_read_page_raw(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *p;
+
+ pl353_prepare_cmd(chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+ if (!buf)
+ return 0;
+ pl353_nand_read_data_op(chip, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_read_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ return 0;
+}
+
+/**
+ * pl353_nand_write_page_raw - [Intern] raw page write function
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *p;
+
+ pl353_prepare_cmd(chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+ pl353_nand_write_data_op(chip, buf, mtd->writesize, false);
+ p = chip->oob_poi;
+ pl353_nand_write_data_op(chip, p,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+ p += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ xnfc->dataphase_addrflags |= (1 << END_CMD_VALID_SHIFT);
+ pl353_nand_write_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * nand_write_page_hwecc - Hardware ECC based page write function
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to write
+ *
+ * This functions writes data and hardware generated ECC values in to the page.
+ *
+ * Return: Always return zero
+ */
+static int pl353_nand_write_page_hwecc(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *oob_ptr;
+ const u8 *p = buf;
+ u32 ret;
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ pl353_prepare_cmd(chip, page, 0, NAND_CMD_SEQIN,
+ NAND_CMD_PAGEPROG, 0);
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_write_data_op(chip, p, eccsize, false);
+ p += eccsize;
+ }
+
+ pl353_nand_write_data_op(chip, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ xnfc->dataphase_addrflags |= PL353_NAND_ECC_LAST;
+ pl353_nand_write_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+
+ p = buf;
+ ret = chip->ecc.calculate(chip, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Wait for ECC to be calculated and read the error values */
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
+ 0, chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Clear ECC last bit */
+ xnfc->dataphase_addrflags &= ~PL353_NAND_ECC_LAST;
+
+ /* Write the spare area with ECC bytes */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_write_data_op(chip, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ xnfc->dataphase_addrflags |= (1 << END_CMD_VALID_SHIFT);
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_write_data_op(chip, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * pl353_nand_read_page_hwecc - Hardware ECC based page read function
+ * @chip: Pointer to the nand_chip structure
+ * @buf: Pointer to the buffer to store read data
+ * @oob_required: Caller requires OOB data read to chip->oob_poi
+ * @page: Page number to read
+ *
+ * This functions reads data and checks the data integrity by comparing
+ * hardware generated ECC values and read ECC values from spare area.
+ * There is a limitation in SMC controller, that we must set ECC LAST on
+ * last data phase access, to tell ECC block not to expect any data further.
+ * Ex: When number of ECC STEPS are 4, then till 3 we will write to flash
+ * using SMC with HW ECC enabled. And for the last ECC STEP, we will subtract
+ * 4bytes from page size, and will initiate a transfer. And the remaining 4 as
+ * one more transfer with ECC_LAST bit set in NAND data phase register to
+ * notify ECC block not to expect any more data. The last block should be align
+ * with end of 512 byte block. Because of this limitation, we are not using
+ * core routines.
+ *
+ * Return: 0 always and updates ECC operation status in to MTD structure
+ */
+static int pl353_nand_read_page_hwecc(struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ unsigned int max_bitflips = 0;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc = chip->ecc.code_buf;
+ u8 *oob_ptr;
+ u32 ret;
+
+ pl353_prepare_cmd(chip, page, 0, NAND_CMD_READ0,
+ NAND_CMD_READSTART, 1);
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+
+ for ( ; (eccsteps - 1); eccsteps--) {
+ pl353_nand_read_data_op(chip, p, eccsize, false);
+ p += eccsize;
+ }
+
+ pl353_nand_read_data_op(chip, p,
+ (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH),
+ false);
+ p += (eccsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+
+ /* Set ECC Last bit to 1 */
+ xnfc->dataphase_addrflags |= PL353_NAND_ECC_LAST;
+ pl353_nand_read_data_op(chip, p, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ /* Wait till the ECC operation is complete or timeout */
+ ret = pl353_wait_for_ecc_done();
+ if (ret)
+ dev_err(xnfc->dev, "ECC Timeout\n");
+
+ /* Read the calculated ECC value */
+ p = buf;
+ ret = chip->ecc.calculate(chip, p, &ecc_calc[0]);
+ if (ret)
+ return ret;
+
+ /* Clear ECC last bit */
+ xnfc->dataphase_addrflags &= ~PL353_NAND_ECC_LAST;
+
+ /* Read the stored ECC value */
+ oob_ptr = chip->oob_poi;
+ pl353_nand_read_data_op(chip, oob_ptr,
+ (mtd->oobsize -
+ PL353_NAND_LAST_TRANSFER_LENGTH), false);
+
+ /* de-assert chip select */
+ xnfc->dataphase_addrflags |= PL353_NAND_CLEAR_CS;
+ oob_ptr += (mtd->oobsize - PL353_NAND_LAST_TRANSFER_LENGTH);
+ pl353_nand_read_data_op(chip, oob_ptr, PL353_NAND_LAST_TRANSFER_LENGTH,
+ false);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ /* Check ECC error for all blocks and correct if it is correctable */
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ stat = chip->ecc.correct(chip, p, &ecc[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int pl353_nand_exec_op_cmd(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct pl353_nfc_op nfc_op = {};
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ unsigned long end_cmd_valid = 0;
+ unsigned int op_id, len;
+ bool reading;
+ u32 cmdphase_addrflags;
+ const struct nand_op_instr *instr = NULL;
+ int i;
+ u32 col = 0, row = 0;
+ u32 naddrs = 0;
+ u8 val;
+
+ memset(&nfc_op, 0, sizeof(struct pl353_nfc_op));
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (op_id) {
+ nfc_op.cmnds[1] = instr->ctx.cmd.opcode;
+
+ /*
+ * end_cmd_valid is set when there is a
+ * command cycle followed by Address cycle
+ */
+ if (naddrs)
+ end_cmd_valid = 1;
+ } else {
+ nfc_op.cmnds[0] = instr->ctx.cmd.opcode;
+ end_cmd_valid = 0;
+ }
+
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ i = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop,
+ op_id);
+ val = instr->ctx.addr.addrs[i];
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ col |= instr->ctx.addr.addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ row = instr->ctx.addr.addrs[4];
+
+ if (naddrs >= 6)
+ row |= (instr->ctx.addr.addrs[5] << 8);
+
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.data_instr = instr;
+ nfc_op.data_instr_idx = op_id;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op.rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op.rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+
+ instr = nfc_op.data_instr;
+ op_id = nfc_op.data_instr_idx;
+
+ /* Clear interrupts */
+ pl353_smc_clr_nand_int();
+
+ cmdphase_addrflags = ((naddrs << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (nfc_op.cmnds[1] << END_CMD_SHIFT) |
+ (nfc_op.cmnds[0] << START_CMD_SHIFT));
+
+ xnfc->dataphase_addrflags = ((0x0 << CLEAR_CS_SHIFT) |
+ (0 << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (nfc_op.cmnds[0] << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT));
+
+ if (naddrs >= 2) {
+ writel_relaxed(col, xnfc->regs + cmdphase_addrflags);
+ writel_relaxed(row, xnfc->regs + cmdphase_addrflags);
+ } else {
+ writel_relaxed(col, xnfc->regs + cmdphase_addrflags);
+ }
+
+ if (!nfc_op.data_instr) {
+ if (nfc_op.rdy_timeout_ms) {
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+ }
+ return 0;
+ }
+
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+ len = nand_subop_get_data_len(subop, op_id);
+
+ if (!reading) {
+ pl353_nand_write_data_op(chip, instr->ctx.data.buf.out,
+ len, instr->ctx.data.force_8bit);
+ if (nfc_op.rdy_timeout_ms) {
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+ }
+ ndelay(nfc_op.rdy_delay_ns);
+ } else {
+ ndelay(nfc_op.rdy_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ if (pl353_wait_for_dev_ready(chip))
+ return -ETIMEDOUT;
+ }
+
+ pl353_nand_read_data_op(chip, instr->ctx.data.buf.in, len,
+ instr->ctx.data.force_8bit);
+ }
+
+ return 0;
+}
+
+static const struct nand_op_parser pl353_nfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PL353_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, PL353_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ NAND_OP_PARSER_PATTERN(pl353_nand_exec_op_cmd,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, PL353_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int pl353_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ return nand_op_parser_exec_op(chip, &pl353_nfc_op_parser,
+ op, check_only);
+}
+
+/**
+ * pl353_nand_ecc_init - Initialize the ecc information as per the ecc mode
+ * @mtd: Pointer to the mtd_info structure
+ * @ecc: Pointer to ECC control structure
+ * @ecc_mode: ondie ecc status
+ *
+ * This function initializes the ecc block and functional pointers as per the
+ * ecc mode
+ *
+ * Return: 0 on success or negative errno.
+ */
+static int pl353_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+ int ecc_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ int ret = 0;
+
+ ecc->read_oob = pl353_nand_read_oob;
+ ecc->write_oob = pl353_nand_write_oob;
+ ecc->write_page_raw = pl353_nand_write_page_raw;
+ ecc->read_page_raw = pl353_nand_read_page_raw;
+
+ if (ecc_mode == NAND_ECC_ON_DIE) {
+ ecc->write_page = pl353_nand_write_page_raw;
+ ecc->read_page = pl353_nand_read_page_raw;
+
+ /*
+ * On-Die ECC spare bytes offset 8 is used for ECC codes
+ * Use the BBT pattern descriptors
+ */
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ ret = pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_BYPASS);
+ if (ret)
+ return ret;
+
+ } else {
+ ecc->mode = NAND_ECC_HW;
+
+ /* Hardware ECC generates 3 bytes ECC code for each 512 bytes */
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ ecc->calculate = pl353_nand_calculate_hwecc;
+ ecc->correct = pl353_nand_correct_data;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->size = PL353_NAND_ECC_SIZE;
+ ecc->read_page = pl353_nand_read_page_hwecc;
+ ecc->write_page = pl353_nand_write_page_hwecc;
+ pl353_smc_set_ecc_pg_size(mtd->writesize);
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_1K:
+ case SZ_2K:
+ pl353_smc_set_ecc_mode(PL353_SMC_ECCMODE_APB);
+ break;
+ default:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->size = 256;
+ break;
+ }
+
+ if (mtd->oobsize == 16) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout16_ops);
+ } else if (mtd->oobsize == 64) {
+ mtd_set_ooblayout(mtd, &pl353_ecc_ooblayout64_ops);
+ } else {
+ dev_err(xnfc->dev, "Unsupported oob Layout\n");
+ ret = -ENXIO;
+ }
+ }
+
+ return ret;
+}
+
+static int pl353_nfc_setup_data_interface(struct nand_chip *chip, int csline,
+ const struct nand_data_interface
+ *conf)
+{
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ const struct nand_sdr_timings *sdr;
+ u32 timings[7], mckperiodps;
+
+ if (csline == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles.
+ */
+ mckperiodps = NSEC_PER_SEC / xnfc->mclk_rate;
+ mckperiodps *= 1000;
+
+ if (sdr->tRC_min <= 20000)
+ /*
+ * PL353 SMC needs one extra read cycle in SDR Mode 5
+ * This is not written anywhere in the datasheet but
+ * the results observed during testing.
+ */
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps) + 1;
+ else
+ timings[0] = DIV_ROUND_UP(sdr->tRC_min, mckperiodps);
+
+ timings[1] = DIV_ROUND_UP(sdr->tWC_min, mckperiodps);
+
+ /*
+ * For all SDR modes, PL353 SMC needs tREA max value as 1,
+ * Results observed during testing.
+ */
+ timings[2] = PL353_TREA_MAX_VALUE;
+ timings[3] = DIV_ROUND_UP(sdr->tWP_min, mckperiodps);
+ timings[4] = DIV_ROUND_UP(sdr->tCLR_min, mckperiodps);
+ timings[5] = DIV_ROUND_UP(sdr->tAR_min, mckperiodps);
+ timings[6] = DIV_ROUND_UP(sdr->tRR_min, mckperiodps);
+ pl353_smc_set_cycles(timings);
+
+ return 0;
+}
+
+static int pl353_nand_attach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct pl353_nand_controller *xnfc = to_pl353_nand(chip);
+ int ret;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ret = pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_16);
+ if (ret) {
+ dev_err(xnfc->dev, "Set BusWidth failed\n");
+ return ret;
+ }
+ }
+
+ if (mtd->writesize <= SZ_512)
+ xnfc->addr_cycles = 1;
+ else
+ xnfc->addr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ xnfc->addr_cycles += 3;
+ else
+ xnfc->addr_cycles += 2;
+
+ ret = pl353_nand_ecc_init(mtd, &chip->ecc, chip->ecc.mode);
+ if (ret) {
+ dev_err(xnfc->dev, "ECC init failed\n");
+ return ret;
+ }
+
+ if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "pl353-nand";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(xnfc->dev, GFP_KERNEL,
+ "%s", PL353_NAND_DRIVER_NAME);
+ if (!mtd->name) {
+ dev_err(xnfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static const struct nand_controller_ops pl353_nand_controller_ops = {
+ .attach_chip = pl353_nand_attach_chip,
+ .exec_op = pl353_nfc_exec_op,
+ .setup_data_interface = pl353_nfc_setup_data_interface,
+};
+
+/**
+ * pl353_nand_probe - Probe method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ * The NAND driver has dependency with the pl353_smc memory controller
+ * driver for initializing the NAND timing parameters, bus width, ECC modes,
+ * control and status information.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_probe(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ struct resource *res;
+ struct device_node *np, *dn;
+ struct clk *mclk;
+ u32 ret, val;
+
+ xnfc = devm_kzalloc(&pdev->dev, sizeof(*xnfc), GFP_KERNEL);
+ if (!xnfc)
+ return -ENOMEM;
+
+ xnfc->dev = &pdev->dev;
+ nand_controller_init(&xnfc->controller);
+ xnfc->controller.ops = &pl353_nand_controller_ops;
+
+ /* Map physical address of NAND flash */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xnfc->regs = devm_ioremap_resource(xnfc->dev, res);
+ if (IS_ERR(xnfc->regs))
+ return PTR_ERR(xnfc->regs);
+
+ chip = &xnfc->chip;
+ chip->controller = &xnfc->controller;
+ mtd = nand_to_mtd(chip);
+ nand_set_controller_data(chip, xnfc);
+ mtd->priv = chip;
+ mtd->owner = THIS_MODULE;
+ nand_set_flash_node(chip, xnfc->dev->of_node);
+
+ np = of_get_next_parent(xnfc->dev->of_node);
+ mclk = of_clk_get_by_name(np, "memclk");
+ if (IS_ERR(mclk)) {
+ dev_err(xnfc->dev, "Failed to retrieve MCK clk\n");
+ return PTR_ERR(mclk);
+ }
+
+ xnfc->mclk_rate = clk_get_rate(mclk);
+ dn = nand_get_flash_node(chip);
+ ret = of_property_read_u32(dn, "nand-bus-width", &val);
+ if (ret)
+ val = 8;
+
+ xnfc->buswidth = val;
+
+ /* Set the device option and flash width */
+ chip->options = NAND_BUSWIDTH_AUTO;
+ chip->bbt_options = NAND_BBT_USE_FLASH;
+ platform_set_drvdata(pdev, xnfc);
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ dev_err(xnfc->dev, "could not scan the nand chip\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(xnfc->dev, "Failed to register mtd device: %d\n", ret);
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pl353_nand_remove - Remove method for the NAND driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if the driver module is being unloaded. It frees all
+ * resources allocated to the device.
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int pl353_nand_remove(struct platform_device *pdev)
+{
+ struct pl353_nand_controller *xnfc = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = nand_to_mtd(&xnfc->chip);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* Release resources, unregister device */
+ nand_release(chip);
+
+ return 0;
+}
+
+/* Match table for device tree binding */
+static const struct of_device_id pl353_nand_of_match[] = {
+ { .compatible = "arm,pl353-nand-r2p1" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pl353_nand_of_match);
+
+/*
+ * pl353_nand_driver - This structure defines the NAND subsystem platform driver
+ */
+static struct platform_driver pl353_nand_driver = {
+ .probe = pl353_nand_probe,
+ .remove = pl353_nand_remove,
+ .driver = {
+ .name = PL353_NAND_DRIVER_NAME,
+ .of_match_table = pl353_nand_of_match,
+ },
+};
+
+module_platform_driver(pl353_nand_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_ALIAS("platform:" PL353_NAND_DRIVER_NAME);
+MODULE_DESCRIPTION("ARM PL353 NAND Flash Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/spi-nor/controllers/cadence-quadspi.c b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
index 494dcab4aaaa..c2ad763aec4a 100644
--- a/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c
@@ -25,15 +25,20 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/sched.h>
+#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
+#define CQSPI_HAS_DMA BIT(1)
+#define CQSPI_SUPPORT_RESET BIT(2)
/* Capabilities mask */
#define CQSPI_BASE_HWCAPS_MASK \
@@ -88,6 +93,23 @@ struct cqspi_st {
u32 trigger_address;
u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+ bool read_dma;
+ void *rxbuf;
+ int bytes_to_rx;
+ int bytes_to_dma;
+ loff_t addr;
+ dma_addr_t dma_addr;
+ u8 edge_mode;
+ bool extra_dummy;
+ u8 access_mode;
+ bool unalined_byte_cnt;
+ u8 dll_mode;
+ struct completion tuning_complete;
+ struct completion request_complete;
+ int (*indirect_read_dma)(struct spi_nor *nor, u_char *rxbuf,
+ loff_t from_addr, size_t n_rx);
+ int (*flash_reset)(struct cqspi_st *cqspi, u8 reset_type);
+ const struct zynqmp_eemi_ops *eemi_ops;
};
struct cqspi_driver_platdata {
@@ -98,6 +120,8 @@ struct cqspi_driver_platdata {
/* Operation timeout value */
#define CQSPI_TIMEOUT_MS 500
#define CQSPI_READ_TIMEOUT_MS 10
+#define CQSPI_TUNING_TIMEOUT_MS 5000
+#define CQSPI_TUNING_PERIODICITY_MS 300000
/* Instruction type */
#define CQSPI_INST_TYPE_SINGLE 0
@@ -111,13 +135,20 @@ struct cqspi_driver_platdata {
#define CQSPI_STIG_DATA_LEN_MAX 8
+/* Edge mode */
+#define CQSPI_EDGE_MODE_SDR 0
+#define CQSPI_EDGE_MODE_DDR 1
+
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_PHY_ENABLE_MASK BIT(3)
#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_AHB_ADDR_REMAP_MASK BIT(16)
+#define CQSPI_REG_CONFIG_DTR_PROT_EN_MASK BIT(24)
#define CQSPI_REG_CONFIG_BAUD_LSB 19
#define CQSPI_REG_CONFIG_IDLE_LSB 31
#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
@@ -137,6 +168,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_WR_INSTR 0x08
#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_OPCODE_MASK 0xFF
#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
@@ -151,6 +183,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_DQS_ENABLE BIT(8)
#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
@@ -171,6 +204,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_DMA_BURST_LSB 8
#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
#define CQSPI_REG_DMA_BURST_MASK 0xFF
+#define CQSPI_REG_DMA_VAL 0x602
#define CQSPI_REG_REMAP 0x24
#define CQSPI_REG_MODE_BIT 0x28
@@ -181,8 +215,13 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+#define CQSPI_REG_WRCOMPLETION 0x38
+#define CQSPI_REG_WRCOMPLETION_POLLCNT_MASK 0xFF0000
+#define CQSPI_REG_WRCOMPLETION_POLLCNY_LSB 16
+
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
+#define CQSPI_REG_ECO 0x48
#define CQSPI_REG_INDIRECTRD 0x60
#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
@@ -196,6 +235,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL 0x90
#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB 7
#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
@@ -206,6 +246,7 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK 0x1F
#define CQSPI_REG_INDIRECTWR 0x70
#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
@@ -216,12 +257,42 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
+#define CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH 0x6
+
#define CQSPI_REG_CMDADDRESS 0x94
#define CQSPI_REG_CMDREADDATALOWER 0xA0
#define CQSPI_REG_CMDREADDATAUPPER 0xA4
#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+#define CQSPI_REG_PHY_CONFIG 0xB4
+#define CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK 0x80000000
+#define CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK 0x40000000
+#define CQSPI_REG_PHY_CONFIG_TX_DLL_DLY_LSB 16
+
+#define CQSPI_REG_PHY_MASTER_CTRL 0xB8
+#define CQSPI_REG_DLL_LOWER 0xBC
+#define CQSPI_REG_DLL_LOWER_LPBK_LOCK_MASK 0x8000
+#define CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK 0x1
+
+#define CQSPI_REG_DMA_SRC_ADDR 0x1000
+#define CQSPI_REG_DMA_DST_ADDR 0x1800
+#define CQSPI_REG_DMA_DST_SIZE 0x1804
+#define CQSPI_REG_DMA_DST_STS 0x1808
+#define CQSPI_REG_DMA_DST_CTRL 0x180C
+#define CQSPI_REG_DMA_DST_CTRL_VAL 0xF43FFA00
+
+#define CQSPI_REG_DMA_DTS_I_STS 0x1814
+#define CQSPI_REG_DMA_DST_I_EN 0x1818
+#define CQSPI_REG_DMA_DST_I_EN_DONE BIT(1)
+
+#define CQSPI_REG_DMA_DST_I_DIS 0x181C
+#define CQSPI_REG_DMA_DST_I_DIS_DONE BIT(1)
+#define CQSPI_REG_DMA_DST_ALL_I_DIS_MASK 0xFE
+#define CQSPI_REG_DMA_DST_I_MASK 0x1820
+#define CQSPI_REG_DMA_DST_ADDR_MSB 0x1828
+
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
@@ -241,6 +312,27 @@ struct cqspi_driver_platdata {
CQSPI_REG_IRQ_UNDERFLOW)
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+#define CQSPI_MIO_NODE_ID_12 0x14108027
+#define CQSPI_READ_ID 0x9F
+#define CQSPI_FAST_READ 0x0C
+#define CQSPI_READ_ID_LEN 6
+#define TERA_MACRO 1000000000000l
+
+#define CQSPI_RESET_TYPE_HWPIN 0
+
+#define CQSPI_DMA_MODE 0
+#define CQSPI_LINEAR_MODE 1
+
+#define RESET_OSPI 0xc10402e
+#define DEV_OSPI 0x1822402a
+
+#define SILICON_VER_MASK 0xFF
+#define SILICON_VER_1 0x10
+#define CQSPI_DLL_MODE_MASTER 0
+#define CQSPI_DLL_MODE_BYPASS 1
+#define TAP_GRAN_SEL_MIN_FREQ 120000000
+#define CQSPI_TX_TAP_MASTER 0x19
+#define CQSPI_MAX_DLL_TAPS 128
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
{
@@ -266,25 +358,6 @@ static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
}
-static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
-{
- struct cqspi_st *cqspi = dev;
- unsigned int irq_status;
-
- /* Read interrupt status */
- irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
- /* Clear interrupt */
- writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
-
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
-
- if (irq_status)
- complete(&cqspi->transfer_complete);
-
- return IRQ_HANDLED;
-}
-
static unsigned int cqspi_calc_rdreg(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -354,6 +427,99 @@ static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
return cqspi_wait_idle(cqspi);
}
+static void process_dma_irq(struct cqspi_st *cqspi)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int rem;
+ unsigned int reg;
+ unsigned int data;
+ u8 addr_bytes;
+ u8 opcode;
+ u8 dummy_cycles;
+
+ /* Disable DMA interrupt */
+ writel(CQSPI_REG_DMA_DST_I_DIS_DONE,
+ cqspi->iobase + CQSPI_REG_DMA_DST_I_DIS);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
+ cqspi->iobase + CQSPI_REG_INDIRECTRD);
+ dma_unmap_single(dev, cqspi->dma_addr, cqspi->bytes_to_dma,
+ DMA_FROM_DEVICE);
+ rem = cqspi->bytes_to_rx - cqspi->bytes_to_dma;
+
+ /* Read unaligned data in STIG */
+ if (rem) {
+ cqspi->rxbuf += cqspi->bytes_to_dma;
+ writel(cqspi->addr + cqspi->bytes_to_dma,
+ cqspi->iobase + CQSPI_REG_CMDADDRESS);
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ opcode = (u8)readl(cqspi->iobase + CQSPI_REG_RD_INSTR);
+ dummy_cycles = (readl(cqspi->iobase +
+ CQSPI_REG_RD_INSTR) >>
+ CQSPI_REG_RD_INSTR_DUMMY_LSB) &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK;
+ } else {
+ opcode = CQSPI_FAST_READ;
+ dummy_cycles = 8;
+ writel((dummy_cycles << CQSPI_REG_RD_INSTR_DUMMY_LSB) |
+ opcode, cqspi->iobase + CQSPI_REG_RD_INSTR);
+ }
+ addr_bytes = readl(cqspi->iobase + CQSPI_REG_SIZE) &
+ CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= (addr_bytes & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+ reg |= (dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB;
+ cqspi->unalined_byte_cnt = false;
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ ((rem % 2) != 0)) {
+ cqspi->unalined_byte_cnt = true;
+ }
+ /* 0 means 1 byte. */
+ reg |= (((rem - 1 + cqspi->unalined_byte_cnt) &
+ CQSPI_REG_CMDCTRL_RD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ cqspi_exec_flash_cmd(cqspi, reg);
+ data = readl(cqspi->iobase + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ memcpy(cqspi->rxbuf, &data, rem);
+ }
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+ unsigned int dma_status;
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+ /* Clear interrupt */
+ if (irq_status)
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Read DMA interrupt status */
+ dma_status = readl(cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
+ dma_status &= CQSPI_REG_DMA_DST_I_EN_DONE;
+
+ /* Clear DMA interrupt */
+ if (dma_status)
+ writel(dma_status, cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
+
+ if (irq_status || dma_status)
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
u8 *rxbuf, size_t n_rx)
{
@@ -364,6 +530,7 @@ static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
unsigned int reg;
size_t read_len;
int status;
+ u8 dummy_cycles;
if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
dev_err(nor->dev,
@@ -382,6 +549,14 @@ static int cqspi_command_read(struct spi_nor *nor, u8 opcode,
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
+ dummy_cycles = 8;
+ else
+ dummy_cycles = 0;
+ if (cqspi->extra_dummy)
+ dummy_cycles++;
+ reg |= ((dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB);
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
@@ -421,11 +596,24 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
return -EINVAL;
}
+ reg = f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(nor, opcode);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
if (n_tx) {
reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ if (nor->is_addrvalid) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((nor->addr_width - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
+ CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+ writel(nor->reg_addr, reg_base + CQSPI_REG_CMDADDRESS);
+ }
data = 0;
write_len = (n_tx > 4) ? 4 : n_tx;
memcpy(&data, txbuf, write_len);
@@ -477,18 +665,27 @@ static int cqspi_read_setup(struct spi_nor *nor)
if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
dummy_clk = CQSPI_DUMMY_CLKS_MAX;
- if (dummy_clk / 8) {
- reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
- /* Set mode bits high to ensure chip doesn't enter XIP */
- writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
-
- /* Need to subtract the mode byte (8 clocks). */
- if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
- dummy_clk -= 8;
-
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ if (cqspi->extra_dummy)
+ dummy_clk++;
if (dummy_clk)
reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
<< CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ } else {
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ /* Set mode bit high to ensure chip doesn't enter XIP */
+ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Need to subtract the mode byte (8 clocks). */
+ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+ dummy_clk -= 8;
+
+ if (dummy_clk)
+ reg |= (dummy_clk &
+ CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
}
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
@@ -512,10 +709,36 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
unsigned int mod_bytes = n_rx % 4;
unsigned int bytes_to_read = 0;
u8 *rxbuf_end = rxbuf + n_rx;
+ u8 *rxbuf_start = rxbuf;
int ret = 0;
+ u32 reg;
+ u8 extra_bytes = 0;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_DMA_MODE) {
+ cqspi_wait_idle(cqspi);
+ cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
+ PM_OSPI_MUX_SEL_LINEAR, 0, NULL);
+ cqspi->access_mode = CQSPI_LINEAR_MODE;
+ cqspi_wait_idle(cqspi);
+ }
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
- writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ ((from_addr % 2) != 0) && !cqspi->unalined_byte_cnt) {
+ if (!cqspi->unalined_byte_cnt) {
+ extra_bytes = 2;
+ mod_bytes += 1;
+ } else if (((n_rx + 1) % 4) != 0) {
+ mod_bytes += 1;
+ }
+ }
+
+ writel(remaining + cqspi->unalined_byte_cnt +
+ extra_bytes, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
@@ -547,12 +770,26 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
bytes_to_read = round_down(bytes_to_read, 4);
/* Read 4 byte word chunks then single bytes */
if (bytes_to_read) {
- ioread32_rep(ahb_base, rxbuf,
- (bytes_to_read / 4));
+ u8 offset = 0;
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ ((from_addr % 2) != 0) && rxbuf ==
+ rxbuf_start) {
+ unsigned int temp = ioread32(ahb_base);
+
+ temp >>= 8;
+ memcpy(rxbuf, &temp, 3);
+ bytes_to_read -= 1;
+ offset = 3;
+ }
+ if (bytes_to_read >= 4)
+ ioread32_rep(ahb_base, rxbuf + offset,
+ (bytes_to_read / 4));
} else if (!word_remain && mod_bytes) {
unsigned int temp = ioread32(ahb_base);
- bytes_to_read = mod_bytes;
+ bytes_to_read = remaining > mod_bytes ?
+ remaining : mod_bytes;
memcpy(rxbuf, &temp, min((unsigned int)
(rxbuf_end - rxbuf),
bytes_to_read));
@@ -593,7 +830,7 @@ failrd:
return ret;
}
-static int cqspi_write_setup(struct spi_nor *nor)
+static int cqspi_write_setup(struct spi_nor *nor, const u8 opcode)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -601,7 +838,9 @@ static int cqspi_write_setup(struct spi_nor *nor)
void __iomem *reg_base = cqspi->iobase;
/* Set opcode. */
- reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
+ reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
writel(reg, reg_base + CQSPI_REG_WR_INSTR);
reg = cqspi_calc_rdreg(nor);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
@@ -623,9 +862,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
unsigned int remaining = n_tx;
unsigned int write_bytes;
int ret;
+ u32 reg;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_DMA_MODE) {
+ cqspi_wait_idle(cqspi);
+ cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
+ PM_OSPI_MUX_SEL_LINEAR, 0, NULL);
+ cqspi->access_mode = CQSPI_LINEAR_MODE;
+ cqspi_wait_idle(cqspi);
+ }
writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
- writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+ writel(remaining + cqspi->unalined_byte_cnt,
+ reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
@@ -916,6 +1169,37 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
case SNOR_PROTO_1_1_8:
f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
break;
+ case SNOR_PROTO_8_8_8:
+ if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (nor->write_proto) {
+ case SNOR_PROTO_1_1_1:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case SNOR_PROTO_1_1_2:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case SNOR_PROTO_1_1_4:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ case SNOR_PROTO_1_1_8:
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ break;
+ case SNOR_PROTO_8_8_8:
+ if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
+ f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -933,14 +1217,30 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
+ reinit_completion(&cqspi->request_complete);
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ !cqspi->tuning_complete.done) {
+ if (!wait_for_completion_timeout(&cqspi->tuning_complete,
+ msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
+ return -ETIMEDOUT;
+ }
+ }
+
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_write_setup(nor);
+ ret = cqspi_write_setup(nor, nor->program_opcode);
if (ret)
return ret;
+ cqspi->unalined_byte_cnt = false;
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ ((len % 2) != 0)) {
+ cqspi->unalined_byte_cnt = true;
+ }
+
if (f_pdata->use_direct_mode) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
ret = cqspi_wait_idle(cqspi);
@@ -1021,7 +1321,20 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ u64 dma_align = (u64)(uintptr_t)buf;
int ret;
+ bool use_dma = true;
+
+ reinit_completion(&cqspi->request_complete);
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ !cqspi->tuning_complete.done) {
+ if (!wait_for_completion_timeout(&cqspi->tuning_complete,
+ msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
+ return -ETIMEDOUT;
+ }
+ }
ret = cqspi_set_protocol(nor, 1);
if (ret)
@@ -1031,26 +1344,52 @@ static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
if (ret)
return ret;
- if (f_pdata->use_direct_mode)
+ cqspi->unalined_byte_cnt = false;
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ if ((len % 2) != 0)
+ cqspi->unalined_byte_cnt = true;
+ if ((from % 2) != 0)
+ use_dma = 0;
+ }
+
+ if (f_pdata->use_direct_mode) {
ret = cqspi_direct_read_execute(nor, buf, from, len);
- else
+ } else if (cqspi->read_dma && virt_addr_valid(buf) && use_dma &&
+ cqspi->indirect_read_dma && len >= 4 &&
+ ((dma_align & 0x3) == 0) && !is_vmalloc_addr(buf)) {
+ ret = cqspi->indirect_read_dma(nor, buf, from, len);
+ } else {
ret = cqspi_indirect_read_execute(nor, buf, from, len);
+ }
if (ret)
return ret;
+ complete(&cqspi->request_complete);
+
return len;
}
static int cqspi_erase(struct spi_nor *nor, loff_t offs)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
+ reinit_completion(&cqspi->request_complete);
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ !cqspi->tuning_complete.done) {
+ if (!wait_for_completion_timeout(&cqspi->tuning_complete,
+ msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
+ return -ETIMEDOUT;
+ }
+ }
+
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- /* Send write enable, then erase commands. */
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ ret = cqspi_write_setup(nor, nor->erase_opcode);
if (ret)
return ret;
@@ -1082,11 +1421,31 @@ static void cqspi_unprep(struct spi_nor *nor)
static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
+ reinit_completion(&cqspi->request_complete);
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ !cqspi->tuning_complete.done) {
+ if (!wait_for_completion_timeout(&cqspi->tuning_complete,
+ msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
+ return -ETIMEDOUT;
+ }
+ }
+
ret = cqspi_set_protocol(nor, 0);
- if (!ret)
+ if (!ret) {
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
+ len = ((len % 2) != 0) ? (len + 1) : len;
ret = cqspi_command_read(nor, opcode, buf, len);
+ }
+
+ if ((opcode == SPINOR_OP_RDFSR && ((FSR_READY & buf[0]) != 0)) ||
+ (SPINOR_OP_RDSR != opcode && SPINOR_OP_RDFSR != opcode)) {
+ complete(&cqspi->request_complete);
+ }
return ret;
}
@@ -1094,12 +1453,27 @@ static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len)
static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf,
size_t len)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
+ reinit_completion(&cqspi->request_complete);
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
+ !cqspi->tuning_complete.done) {
+ if (!wait_for_completion_timeout(&cqspi->tuning_complete,
+ msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
+ return -ETIMEDOUT;
+ }
+ }
+
ret = cqspi_set_protocol(nor, 0);
if (!ret)
ret = cqspi_command_write(nor, opcode, buf, len);
+ if (opcode != SPINOR_OP_WREN)
+ complete(&cqspi->request_complete);
+
return ret;
}
@@ -1168,6 +1542,217 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return 0;
}
+static int cqspi_setdlldelay(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ int i;
+ u8 j;
+ int ret = 1;
+ u8 id[CQSPI_READ_ID_LEN];
+ bool rxtapfound = false;
+ u8 min_rxtap = 0;
+ u8 max_rxtap = 0;
+ u8 avg_rxtap;
+ bool id_matched;
+ u32 txtap = 0;
+ u8 max_tap;
+ s8 max_windowsize = -1;
+ u8 windowsize;
+ u8 dummy_incr;
+ u8 dummy_flag = 0;
+ u8 count;
+ u8 opcode = CQSPI_READ_ID;
+
+ max_tap = ((TERA_MACRO / cqspi->master_ref_clk_hz) / 160);
+ if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
+ /* Drive DLL reset bit to low */
+ writel(0, cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
+ /* Set initial delay value */
+ writel(0x4, cqspi->iobase + CQSPI_REG_PHY_MASTER_CTRL);
+
+ /* Set DLL reset bit */
+ writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
+ /* Check for loopback lock */
+ ret = cqspi_wait_for_bit(cqspi->iobase + CQSPI_REG_DLL_LOWER,
+ CQSPI_REG_DLL_LOWER_LPBK_LOCK_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Loopback lock bit error (%i)\n", ret);
+ return ret;
+ }
+
+ /* Re-synchronize slave DLLs */
+ writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK |
+ CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK,
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
+ txtap = CQSPI_TX_TAP_MASTER <<
+ CQSPI_REG_PHY_CONFIG_TX_DLL_DLY_LSB;
+ max_tap = CQSPI_MAX_DLL_TAPS;
+ }
+
+ cqspi->extra_dummy = false;
+ for (dummy_incr = 0; dummy_incr <= 1; dummy_incr++) {
+ if (dummy_incr)
+ cqspi->extra_dummy = true;
+ for (i = 0; i <= max_tap; i++) {
+ writel((txtap | i |
+ CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap |
+ i | CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
+ ret = cqspi_wait_for_bit(cqspi->iobase +
+ CQSPI_REG_DLL_LOWER,
+ CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK, 0);
+ if (ret)
+ return ret;
+ }
+ count = 0;
+ do {
+ count += 1;
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_read(nor, &opcode,
+ 1, id, CQSPI_READ_ID_LEN);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error %d reading JEDEC ID\n",
+ ret);
+ return ret;
+ }
+ id_matched = true;
+ for (j = 0; j < CQSPI_READ_ID_LEN; j++) {
+ if (nor->device_id[j] != id[j]) {
+ id_matched = false;
+ break;
+ }
+ }
+ } while (id_matched && (count <= 10));
+
+ if (id_matched) {
+ if (!rxtapfound) {
+ min_rxtap = i;
+ max_rxtap = i;
+ rxtapfound = true;
+ } else {
+ max_rxtap = i;
+ }
+ }
+ if (!id_matched || i == max_tap) {
+ if (rxtapfound) {
+ windowsize = max_rxtap - min_rxtap + 1;
+ if (windowsize > max_windowsize) {
+ dummy_flag = dummy_incr;
+ max_windowsize = windowsize;
+ avg_rxtap = (max_rxtap +
+ min_rxtap) / 2;
+ }
+ i = max_tap;
+ rxtapfound = false;
+ }
+ }
+ }
+ if (!dummy_incr) {
+ rxtapfound = false;
+ min_rxtap = 0;
+ max_rxtap = 0;
+ }
+ }
+ if (!dummy_flag)
+ cqspi->extra_dummy = false;
+ if (max_windowsize < 3)
+ return -EINVAL;
+
+ writel((txtap | avg_rxtap | CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap | avg_rxtap |
+ CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+ if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
+ ret = cqspi_wait_for_bit(cqspi->iobase + CQSPI_REG_DLL_LOWER,
+ CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cqspi_periodictuning(struct work_struct *work)
+{
+ struct delayed_work *d = to_delayed_work(work);
+ struct spi_nor *nor = container_of(d, struct spi_nor, complete_work);
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ int ret;
+
+ if (!cqspi->request_complete.done)
+ wait_for_completion(&cqspi->request_complete);
+
+ reinit_completion(&cqspi->tuning_complete);
+ ret = cqspi_setdlldelay(nor);
+ complete_all(&cqspi->tuning_complete);
+ if (ret) {
+ dev_err(nor->dev,
+ "Setting dll delay error (%i)\n", ret);
+ } else {
+ schedule_delayed_work(&nor->complete_work,
+ msecs_to_jiffies(CQSPI_TUNING_PERIODICITY_MS));
+ }
+}
+
+static void cqspi_setup_ddrmode(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ u32 reg;
+
+ cqspi_controller_enable(cqspi, 0);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= (CQSPI_REG_CONFIG_PHY_ENABLE_MASK);
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ /* Program POLL_CNT */
+ reg = readl(cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+ reg &= ~CQSPI_REG_WRCOMPLETION_POLLCNT_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+
+ reg |= (0x3 << CQSPI_REG_WRCOMPLETION_POLLCNY_LSB);
+ writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_READCAPTURE);
+ reg |= CQSPI_REG_READCAPTURE_DQS_ENABLE;
+ writel(reg, cqspi->iobase + CQSPI_REG_READCAPTURE);
+
+ cqspi->edge_mode = CQSPI_EDGE_MODE_DDR;
+
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_setup_edgemode(struct spi_nor *nor)
+{
+ int ret;
+
+ cqspi_setup_ddrmode(nor);
+
+ ret = cqspi_setdlldelay(nor);
+
+ return ret;
+}
+
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
u32 reg;
@@ -1177,8 +1762,14 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
/* Configure the remap address register, no remap */
writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+ /* Reset the Delay lines */
+ writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
+ cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
/* Disable all interrupts. */
writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+ writel(CQSPI_REG_DMA_DST_ALL_I_DIS_MASK,
+ cqspi->iobase + CQSPI_REG_DMA_DST_I_DIS);
/* Configure the SRAM split to 1:1 . */
writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
@@ -1194,14 +1785,206 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
- /* Enable Direct Access Controller */
reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
- reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ reg &= ~CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
+ reg &= ~CQSPI_REG_CONFIG_PHY_ENABLE_MASK;
+ if (cqspi->read_dma) {
+ reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ } else {
+ /* Enable Direct Access Controller */
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ }
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
cqspi_controller_enable(cqspi, 1);
}
+static int cqspi_versal_flash_reset(struct cqspi_st *cqspi, u8 reset_type)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ int ret;
+ int gpio;
+ enum of_gpio_flags flags;
+
+ if (reset_type == CQSPI_RESET_TYPE_HWPIN) {
+ gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+ "reset-gpios", 0, &flags);
+ if (!gpio_is_valid(gpio))
+ return -EIO;
+ ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+ "flash-reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to get reset-gpios: %d\n", ret);
+ return -EIO;
+ }
+
+ /* Request for PIN */
+ cqspi->eemi_ops->pinctrl_request(CQSPI_MIO_NODE_ID_12);
+
+ /* Enable hysteresis in cmos receiver */
+ cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT);
+
+ /* Set the direction as output and enable the output */
+ gpio_direction_output(gpio, 1);
+
+ /* Disable Tri-state */
+ cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_TRI_STATE_DISABLE);
+ udelay(1);
+
+ /* Set value 0 to pin */
+ gpio_set_value(gpio, 0);
+ udelay(1);
+
+ /* Set value 1 to pin */
+ gpio_set_value(gpio, 1);
+ udelay(1);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cqspi_versal_indirect_read_dma(struct spi_nor *nor, u_char *rxbuf,
+ loff_t from_addr, size_t n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int rx_rem;
+ int ret = 0;
+ u32 reg;
+
+ rx_rem = n_rx % 4;
+ cqspi->bytes_to_rx = n_rx;
+ cqspi->bytes_to_dma = (n_rx - rx_rem);
+ cqspi->addr = from_addr;
+ cqspi->rxbuf = rxbuf;
+
+ if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_LINEAR_MODE) {
+ cqspi_wait_idle(cqspi);
+ reg = readl(cqspi->iobase + CQSPI_REG_PHY_CONFIG);
+
+ if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
+ /* Issue controller reset */
+ cqspi->eemi_ops->reset_assert(RESET_OSPI,
+ PM_RESET_ACTION_ASSERT);
+ }
+ cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
+ PM_OSPI_MUX_SEL_DMA, 0, NULL);
+ cqspi->access_mode = CQSPI_DMA_MODE;
+ if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
+ cqspi->eemi_ops->reset_assert(RESET_OSPI,
+ PM_RESET_ACTION_RELEASE);
+ }
+ cqspi_wait_idle(cqspi);
+ if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
+ cqspi_controller_init(cqspi);
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ ret = cqspi_set_protocol(nor, 1);
+ if (ret)
+ return ret;
+
+ if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
+ cqspi_setup_ddrmode(nor);
+ writel(CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK |
+ reg, cqspi->iobase +
+ CQSPI_REG_PHY_CONFIG);
+ }
+
+ ret = cqspi_read_setup(nor);
+ if (ret)
+ return ret;
+ }
+ }
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+ writel(CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH,
+ reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ /* Enable DMA done interrupt */
+ writel(CQSPI_REG_DMA_DST_I_EN_DONE,
+ reg_base + CQSPI_REG_DMA_DST_I_EN);
+
+ /* Default DMA periph configuration */
+ writel(CQSPI_REG_DMA_VAL, reg_base + CQSPI_REG_DMA);
+
+ cqspi->dma_addr = dma_map_single(nor->dev, rxbuf, cqspi->bytes_to_dma,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nor->dev, cqspi->dma_addr)) {
+ dev_err(nor->dev, "ERR:rxdma:memory not mapped\n");
+ goto failrd;
+ }
+ /* Configure DMA Dst address */
+ writel(lower_32_bits(cqspi->dma_addr),
+ reg_base + CQSPI_REG_DMA_DST_ADDR);
+ writel(upper_32_bits(cqspi->dma_addr),
+ reg_base + CQSPI_REG_DMA_DST_ADDR_MSB);
+
+ /* Configure DMA Src read address */
+ writel(cqspi->trigger_address, reg_base + CQSPI_REG_DMA_SRC_ADDR);
+
+ /* Set DMA destination size */
+ writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_DMA_DST_SIZE);
+
+ /* Set DMA destination control */
+ writel(CQSPI_REG_DMA_DST_CTRL_VAL, reg_base + CQSPI_REG_DMA_DST_CTRL);
+
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ reinit_completion(&cqspi->transfer_complete);
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ process_dma_irq(cqspi);
+
+ return 0;
+
+failrd:
+ /* Disable DMA interrupt */
+ writel(CQSPI_REG_DMA_DST_I_DIS_DONE,
+ reg_base + CQSPI_REG_DMA_DST_I_DIS);
+
+ dma_unmap_single(nor->dev, cqspi->dma_addr, cqspi->bytes_to_dma,
+ DMA_DEV_TO_MEM);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ return ret;
+}
+
static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
{
dma_cap_mask_t mask;
@@ -1234,7 +2017,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
const struct cqspi_driver_platdata *ddata;
struct spi_nor_hwcaps hwcaps;
struct cqspi_flash_pdata *f_pdata;
- struct spi_nor *nor;
+ struct spi_nor *nor = NULL;
struct mtd_info *mtd;
unsigned int cs;
int i, ret;
@@ -1285,6 +2068,13 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
}
+ if (ddata->quirks & CQSPI_SUPPORT_RESET) {
+ ret = cqspi->flash_reset(cqspi,
+ CQSPI_RESET_TYPE_HWPIN);
+ if (ret)
+ goto err;
+ }
+
ret = spi_nor_scan(nor, NULL, &hwcaps);
if (ret)
goto err;
@@ -1295,7 +2085,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
f_pdata->registered = true;
- if (mtd->size <= cqspi->ahb_size) {
+ if (mtd->size <= cqspi->ahb_size && !cqspi->read_dma) {
f_pdata->use_direct_mode = true;
dev_dbg(nor->dev, "using direct mode for %s\n",
mtd->name);
@@ -1305,6 +2095,17 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
}
}
+ if (nor && !(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ ret = cqspi_setup_edgemode(nor);
+ if (ret)
+ goto err;
+ complete_all(&cqspi->tuning_complete);
+ complete_all(&cqspi->request_complete);
+ INIT_DELAYED_WORK(&nor->complete_work, cqspi_periodictuning);
+ schedule_delayed_work(&nor->complete_work,
+ msecs_to_jiffies(CQSPI_TUNING_PERIODICITY_MS));
+ }
+
return 0;
err:
@@ -1325,6 +2126,8 @@ static int cqspi_probe(struct platform_device *pdev)
const struct cqspi_driver_platdata *ddata;
int ret;
int irq;
+ u32 idcode;
+ u32 version;
cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
if (!cqspi)
@@ -1367,6 +2170,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
+ init_completion(&cqspi->tuning_complete);
+ init_completion(&cqspi->request_complete);
/* Obtain IRQ line. */
irq = platform_get_irq(pdev, 0);
@@ -1411,6 +2216,35 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
+ if (ddata && (ddata->quirks & CQSPI_HAS_DMA)) {
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ cqspi->read_dma = true;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0")) {
+ cqspi->eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(cqspi->eemi_ops))
+ return PTR_ERR(cqspi->eemi_ops);
+ if (cqspi->read_dma)
+ cqspi->indirect_read_dma =
+ cqspi_versal_indirect_read_dma;
+ cqspi->flash_reset = cqspi_versal_flash_reset;
+ cqspi->access_mode = CQSPI_DMA_MODE;
+ cqspi->dll_mode = CQSPI_DLL_MODE_BYPASS;
+
+ ret = cqspi->eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get chipid is %d\n", ret);
+ goto probe_clk_failed;
+ }
+ if ((version & SILICON_VER_MASK) != SILICON_VER_1) {
+ cqspi->dll_mode = CQSPI_DLL_MODE_MASTER;
+ if (cqspi->master_ref_clk_hz >= TAP_GRAN_SEL_MIN_FREQ)
+ writel(0x1, cqspi->iobase + CQSPI_REG_ECO);
+ }
+ }
+
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
@@ -1422,6 +2256,9 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi_controller_init(cqspi);
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ cqspi->extra_dummy = false;
+ cqspi->edge_mode = CQSPI_EDGE_MODE_SDR;
+ cqspi->unalined_byte_cnt = false;
ret = cqspi_setup_flash(cqspi, np);
if (ret) {
@@ -1503,6 +2340,13 @@ static const struct cqspi_driver_platdata am654_ospi = {
.quirks = CQSPI_NEEDS_WR_DELAY,
};
+static const struct cqspi_driver_platdata versal_ospi = {
+ .hwcaps_mask = (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP | SNOR_HWCAPS_PP_8_8_8 |
+ SNOR_HWCAPS_READ_1_1_8 | SNOR_HWCAPS_READ_8_8_8),
+ .quirks = CQSPI_HAS_DMA | CQSPI_SUPPORT_RESET,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
@@ -1516,6 +2360,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "ti,am654-ospi",
.data = &am654_ospi,
},
+ {
+ .compatible = "xlnx,versal-ospi-1.0",
+ .data = (void *)&versal_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index cc68ea84318e..53020e471764 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -279,23 +279,28 @@ int spi_nor_write_disable(struct spi_nor *nor)
static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
{
int ret;
+ int len;
+ nor->isparallel ? (len = 2) : (len = 1);
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, sr, 1));
+ SPI_MEM_OP_DATA_IN(len, sr, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
- sr, 1);
+ sr, len);
}
if (ret)
dev_dbg(nor->dev, "error %d reading SR\n", ret);
+ if (nor->isparallel)
+ sr[0] |= sr[1];
+
return ret;
}
@@ -310,23 +315,28 @@ static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
{
int ret;
+ int len;
+ nor->isparallel ? (len = 2) : (len = 1);
if (nor->spimem) {
struct spi_mem_op op =
SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(1, fsr, 1));
+ SPI_MEM_OP_DATA_IN(len, fsr, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
- fsr, 1);
+ fsr, len);
}
if (ret)
dev_dbg(nor->dev, "error %d reading FSR\n", ret);
+ if (nor->isparallel)
+ fsr[0] &= fsr[1];
+
return ret;
}
@@ -438,24 +448,49 @@ static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
*
* Return: 0 on success, -errno otherwise.
*/
-int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
+int spi_nor_write_ear(struct spi_nor *nor, u32 ear)
{
+ u8 code = SPINOR_OP_WREAR;
+ u8 addr;
int ret;
+ struct mtd_info *mtd = &nor->mtd;
+
+
+ if (mtd->size <= (0x1000000) << nor->shift)
+ return 0;
- nor->bouncebuf[0] = ear;
+ ear = ear % (u32)mtd->size;
+ addr = ear >> 24;
+
+ if (!nor->isstacked && addr == nor->curbank)
+ return 0;
+
+ if (nor->isstacked && mtd->size <= 0x2000000)
+ return 0;
+
+ if (nor->jedec_id == CFI_MFR_AMD)
+ code = SPINOR_OP_BRWR;
+ if (nor->jedec_id == CFI_MFR_ST ||
+ nor->jedec_id == CFI_MFR_MACRONIX ||
+ nor->jedec_id == CFI_MFR_PMC) {
+ spi_nor_write_enable(nor);
+ code = SPINOR_OP_WREAR;
+ }
+ nor->bouncebuf[0] = addr;
if (nor->spimem) {
struct spi_mem_op op =
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
+ SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
ret = spi_mem_exec_op(nor->spimem, &op);
} else {
- ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
+ ret = nor->controller_ops->write_reg(nor, code,
nor->bouncebuf, 1);
}
+ nor->curbank = addr;
if (ret)
dev_dbg(nor->dev, "error %d writing EAR\n", ret);
@@ -495,6 +530,50 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
}
/**
+ * read_ear - Get the extended/bank address register value
+ * @nor: Pointer to the flash control structure
+ * @info: Pointer to the flash info structure
+ *
+ * This routine reads the Extended/bank address register value
+ *
+ * Return: Negative if error occurred.
+ */
+static int read_ear(struct spi_nor *nor, struct flash_info *info)
+{
+ int ret;
+ u8 code;
+
+ /* This is actually Spansion */
+ if (nor->jedec_id == CFI_MFR_AMD)
+ code = SPINOR_OP_BRRD;
+ /* This is actually Micron */
+ else if (nor->jedec_id == CFI_MFR_ST ||
+ nor->jedec_id == CFI_MFR_MACRONIX ||
+ nor->jedec_id == CFI_MFR_PMC)
+ code = SPINOR_OP_RDEAR;
+ else
+ return -EINVAL;
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = nor->controller_ops->read_reg(nor, code, nor->bouncebuf, 1);
+ }
+ if (ret < 0) {
+ pr_err("error %d reading EAR\n", ret);
+ return ret;
+ }
+
+ return nor->bouncebuf[0];
+}
+
+
+/**
* spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
* the flash is ready for new commands.
* @nor: pointer to 'struct spi_nor'.
@@ -711,6 +790,7 @@ int spi_nor_wait_till_ready(struct spi_nor *nor)
return spi_nor_wait_till_ready_with_timeout(nor,
DEFAULT_READY_WAIT_JIFFIES);
}
+EXPORT_SYMBOL_GPL(spi_nor_wait_till_ready);
/**
* spi_nor_write_sr() - Write the Status Register.
@@ -897,6 +977,118 @@ static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
return 0;
}
+static inline u16 min_lockable_sectors(struct spi_nor *nor,
+ u16 n_sectors)
+{
+ u16 lock_granularity;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ */
+ lock_granularity = max(1, n_sectors / M25P_MAX_LOCKABLE_SECTORS);
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lock_granularity = 1;
+
+ return lock_granularity;
+}
+
+static inline uint32_t get_protected_area_start(struct spi_nor *nor,
+ u8 lock_bits)
+{
+ u16 n_sectors;
+ u32 sector_size;
+ u64 mtd_size;
+ struct mtd_info *mtd = &nor->mtd;
+
+ n_sectors = nor->n_sectors;
+ sector_size = nor->sector_size;
+ mtd_size = mtd->size;
+
+ if (nor->isparallel) {
+ sector_size = (nor->sector_size >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+ if (nor->isstacked) {
+ n_sectors = (nor->n_sectors >> 1);
+ mtd_size = (mtd->size >> 1);
+ }
+
+ return mtd_size - (1 << (lock_bits - 1)) *
+ min_lockable_sectors(nor, n_sectors) * sector_size;
+}
+
+static u8 min_protected_area_including_offset(struct spi_nor *nor,
+ uint32_t offset)
+{
+ u8 lock_bits, lockbits_limit;
+
+ /*
+ * Revisit - SST (not used by us) has the same JEDEC ID as micron but
+ * protected area table is similar to that of spansion.
+ * Mircon has 4 block protect bits.
+ */
+ lockbits_limit = 7;
+ if (nor->jedec_id == CFI_MFR_ST) /* Micron */
+ lockbits_limit = 15;
+
+ for (lock_bits = 1; lock_bits < lockbits_limit; lock_bits++) {
+ if (offset >= get_protected_area_start(nor, lock_bits))
+ break;
+ }
+ return lock_bits;
+}
+
+static int write_sr_modify_protection(struct spi_nor *nor, u8 status,
+ u8 lock_bits)
+{
+ u8 status_new, bp_mask;
+ int ret;
+
+ status_new = status & ~SR_BP_BIT_MASK;
+ bp_mask = (lock_bits << SR_BP_BIT_OFFSET) & SR_BP_BIT_MASK;
+
+ /* Micron */
+ if (nor->jedec_id == CFI_MFR_ST) {
+ /* To support chips with more than 896 sectors (56MB) */
+ status_new &= ~SR_BP3;
+
+ /* Protected area starts from top */
+ status_new &= ~SR_BP_TB;
+
+ if (lock_bits > 7)
+ bp_mask |= SR_BP3;
+ }
+
+ if (nor->is_lock)
+ status_new |= bp_mask;
+
+ spi_nor_write_enable(nor);
+
+ /* For spansion flashes */
+ if (nor->jedec_id == CFI_MFR_AMD) {
+ ret = spi_nor_write_16bit_sr_and_check(nor, status_new);
+ if (ret)
+ return 1;
+ } else {
+ ret = spi_nor_write_sr(nor, &status_new, 1);
+ if (ret)
+ return 1;
+ }
+ return 0;
+}
+
+static u8 bp_bits_from_sr(struct spi_nor *nor, u8 status)
+{
+ u8 ret;
+
+ ret = (((status) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET);
+ if (nor->jedec_id == 0x20)
+ ret |= ((status & SR_BP3) >> (SR_BP_BIT_OFFSET + 1));
+
+ return ret;
+}
+
/**
* spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
* the byte written match the received value without affecting other bits in the
@@ -1001,8 +1193,21 @@ static int spi_nor_erase_chip(struct spi_nor *nor)
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
SPI_MEM_OP_NO_DATA);
-
+ if (nor->isstacked)
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
ret = spi_mem_exec_op(nor->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (nor->isstacked) {
+ /* Wait until previous write command finished */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ }
+ return ret;
} else {
ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
NULL, 0);
@@ -1080,6 +1285,16 @@ static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
{
+ /* Do some manufacturer fixups first */
+ switch (nor->jedec_id) {
+ case CFI_MFR_AMD:
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = nor->info->sector_size;
+ break;
+ default:
+ break;
+ }
nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
@@ -1452,7 +1667,7 @@ destroy_erase_cmd_list:
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 addr, len;
+ u32 addr, len, offset;
uint32_t rem;
int ret;
@@ -1508,8 +1723,33 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = spi_nor_write_enable(nor);
if (ret)
goto erase_err;
+ offset = addr;
+ if (nor->isparallel == 1)
+ offset /= 2;
+ if (nor->isstacked == 1) {
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 3) {
+ /* Update Extended Address Register */
+ ret = spi_nor_write_ear(nor, offset);
+ if (ret)
+ goto erase_err;
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
- ret = spi_nor_erase_sector(nor, addr);
+ ret = spi_nor_write_enable(nor);
+ if (ret)
+ goto erase_err;
+ ret = spi_nor_erase_sector(nor, offset);
if (ret)
goto erase_err;
@@ -1864,14 +2104,45 @@ static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u8 status;
+ u8 lock_bits;
int ret;
ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
ret = nor->params->locking_ops->lock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ ret = spi_nor_read_sr(nor, &status);
+ if (ret)
+ goto err;
+
+ lock_bits = min_protected_area_including_offset(nor, ofs);
+ /* Only modify protection if it will not unlock other areas */
+ if (lock_bits > bp_bits_from_sr(nor, status)) {
+ nor->is_lock = 1;
+ ret = write_sr_modify_protection(nor, status, lock_bits);
+ } else {
+ dev_err(nor->dev, "trying to unlock already locked area\n");
+ }
+err:
spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -1880,13 +2151,44 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
int ret;
+ u8 status[2];
+ u8 lock_bits;
ret = spi_nor_lock_and_prep(nor);
if (ret)
return ret;
+ if (nor->isparallel == 1)
+ ofs = ofs >> nor->shift;
+
+ if (nor->isstacked == 1) {
+ if (ofs >= (mtd->size / 2)) {
+ ofs = ofs - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
ret = nor->params->locking_ops->unlock(nor, ofs, len);
+ /* Wait until finished previous command */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto err;
+
+ ret = spi_nor_read_sr(nor, status);
+ if (ret)
+ goto err;
+ lock_bits = min_protected_area_including_offset(nor, ofs + len) - 1;
+
+ /* Only modify protection if it will not lock other areas */
+ if (lock_bits < bp_bits_from_sr(nor, status[0])) {
+ nor->is_lock = 0;
+ ret = write_sr_modify_protection(nor, status[0], lock_bits);
+ } else {
+ dev_err(nor->dev, "trying to lock already unlocked area\n");
+ }
+err:
spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2047,6 +2349,7 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
u8 *id = nor->bouncebuf;
unsigned int i;
int ret;
+ int tmp;
if (nor->spimem) {
struct spi_mem_op op =
@@ -2065,6 +2368,9 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
return ERR_PTR(ret);
}
+ for (tmp = 0; tmp < SPI_NOR_MAX_ID_LEN; tmp++)
+ nor->device_id[tmp] = id[tmp];
+
for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
info = spi_nor_search_part_by_id(manufacturers[i]->parts,
manufacturers[i]->nparts,
@@ -2085,19 +2391,109 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
ssize_t ret;
-
+ u32 offset = from;
+ u32 stack_shift = 0;
+ u32 read_len = 0;
+ u32 rem_bank_len = 0;
+ u8 bank;
+ u8 is_ofst_odd = 0;
+ u8 cur_bank;
+ u8 nxt_bank;
+ u32 bank_size;
+ u_char *ptr;
+
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+ if (nor->isparallel && (offset & 1)) {
+ /* We can hit this case when we use file system like ubifs */
+ from = (loff_t)(from - 1);
+ len = (size_t)(len + 1);
+ is_ofst_odd = 1;
+ ptr = kmalloc(len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ } else {
+ ptr = buf;
+ }
ret = spi_nor_lock_and_prep(nor);
- if (ret)
+ if (ret) {
+ if (is_ofst_odd == 1)
+ kfree(ptr);
return ret;
+ }
while (len) {
- loff_t addr = from;
+ if (nor->addr_width == 3) {
+ bank = (u32)from / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - from;
+ }
+ offset = from;
+ if (nor->isparallel == 1)
+ offset /= 2;
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
+ if (nor->addr_width == 4) {
+ /*
+ * Some flash devices like N25Q512 have multiple dies
+ * in it. Read operation in these devices is bounded
+ * by its die segment. In a continuous read, across
+ * multiple dies, when the last byte of the selected
+ * die segment is read, the next byte read is the
+ * first byte of the same die segment. This is Die
+ * cross over issue. So to handle this issue, split
+ * a read transaction, that spans across multiple
+ * banks, into one read per bank. Bank size is 16MB
+ * for single and dual stacked mode and 32MB for dual
+ * parallel mode.
+ */
+ if (nor->spi && nor->spi->multi_die) {
+ bank_size = (OFFSET_16_MB << nor->shift);
+ cur_bank = offset / bank_size;
+ nxt_bank = (offset + len) / bank_size;
+ if (cur_bank != nxt_bank)
+ rem_bank_len = (bank_size *
+ (cur_bank + 1)) -
+ offset;
+ else
+ rem_bank_len = (mtd->size >>
+ stack_shift) -
+ (offset << nor->shift);
+ } else {
+ rem_bank_len = (mtd->size >> stack_shift) -
+ (offset << nor->shift);
+ }
+ }
+
+ if (nor->addr_width == 3) {
+ ret = spi_nor_write_ear(nor, offset);
+ if (ret) {
+ dev_err(nor->dev, "While writing ear register\n");
+ goto read_err;
+ }
+ }
+ if (len < rem_bank_len)
+ read_len = len;
+ else
+ read_len = rem_bank_len;
+
+ /* Wait till previous write/erase is done. */
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto read_err;
- addr = spi_nor_convert_addr(nor, addr);
+ offset = spi_nor_convert_addr(nor, offset);
- ret = spi_nor_read_data(nor, addr, len, buf);
+ ret = spi_nor_read_data(nor, (offset), read_len, ptr);
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2107,14 +2503,23 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
goto read_err;
WARN_ON(ret > len);
- *retlen += ret;
+ if (is_ofst_odd == 1) {
+ memcpy(buf, (ptr + 1), (len - 1));
+ *retlen += (ret - 1);
+ } else {
+ *retlen += ret;
+ }
buf += ret;
+ if (!is_ofst_odd)
+ ptr += ret;
from += ret;
len -= ret;
}
ret = 0;
read_err:
+ if (is_ofst_odd == 1)
+ kfree(ptr);
spi_nor_unlock_and_unprep(nor);
return ret;
}
@@ -2130,8 +2535,29 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_nor *nor = mtd_to_spi_nor(mtd);
size_t page_offset, page_remain, i;
ssize_t ret;
+ u32 offset, stack_shift = 0;
+ u8 bank = 0;
+ u32 rem_bank_len = 0;
+#define OFFSET_16_MB 0x1000000
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+ /*
+ * Cannot write to odd offset in parallel mode,
+ * so write 2 bytes first
+ */
+ if (nor->isparallel && (to & 1)) {
+ u8 two[2] = {0xff, buf[0]};
+ size_t local_retlen;
+
+ ret = spi_nor_write(mtd, to & ~1, 2, &local_retlen, two);
+ if (ret < 0)
+ return ret;
+
+ *retlen += 1; /* We've written only one actual byte */
+ ++buf;
+ --len;
+ ++to;
+ }
ret = spi_nor_lock_and_prep(nor);
if (ret)
@@ -2141,6 +2567,11 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
ssize_t written;
loff_t addr = to + i;
+ if (nor->addr_width == 3) {
+ bank = (u32)to / (OFFSET_16_MB << nor->shift);
+ rem_bank_len = ((OFFSET_16_MB << nor->shift) *
+ (bank + 1)) - to;
+ }
/*
* If page_size is a power of two, the offset can be quickly
* calculated with an AND operation. On the other cases we
@@ -2156,17 +2587,58 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
page_offset = do_div(aux, nor->page_size);
}
- /* the size of data remaining on the first page */
- page_remain = min_t(size_t,
- nor->page_size - page_offset, len - i);
+ offset = (to + i);
+ if (nor->isparallel == 1)
+ offset /= 2;
+
+ if (nor->isstacked == 1) {
+ stack_shift = 1;
+ if (offset >= (mtd->size / 2)) {
+ offset = offset - (mtd->size / 2);
+ nor->spi->master->flags |= SPI_MASTER_U_PAGE;
+ } else {
+ nor->spi->master->flags &= ~SPI_MASTER_U_PAGE;
+ }
+ }
+
+ /* Die cross over issue is not handled */
+ if (nor->addr_width == 4)
+ rem_bank_len = (mtd->size >> stack_shift) - offset;
+ if (nor->addr_width == 3) {
+ ret = spi_nor_write_ear(nor, offset);
+ if (ret) {
+ dev_err(nor->dev, "While writing ear register\n");
+ goto write_err;
+ }
+ }
+ if (nor->isstacked == 1) {
+ if (len <= rem_bank_len) {
+ page_remain = min_t(size_t,
+ nor->page_size -
+ page_offset, len - i);
+ } else {
+ /*
+ * the size of data remaining
+ * on the first page
+ */
+ page_remain = rem_bank_len;
+ }
+ } else {
+ page_remain = min_t(size_t,
+ nor->page_size -
+ page_offset, len - i);
+ }
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
- addr = spi_nor_convert_addr(nor, addr);
+ offset = spi_nor_convert_addr(nor, offset);
ret = spi_nor_write_enable(nor);
if (ret)
goto write_err;
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ ret = spi_nor_write_data(nor, (offset), page_remain, buf + i);
if (ret < 0)
goto write_err;
written = ret;
@@ -2176,6 +2648,13 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
goto write_err;
*retlen += written;
i += written;
+ if (written != page_remain) {
+ dev_err(nor->dev,
+ "While writing %zu bytes written %zd bytes\n",
+ page_remain, written);
+ ret = -EIO;
+ goto write_err;
+ }
}
write_err:
@@ -2183,6 +2662,27 @@ write_err:
return ret;
}
+static int spi_nor_switch_micron_octal_ddr(struct spi_nor *nor)
+{
+ u8 cr = SPINOR_VCR_OCTAL_DDR;
+ int ret;
+
+ spi_nor_write_enable(nor);
+ nor->addr_width = 3;
+ nor->is_addrvalid = true;
+ nor->reg_addr = 0x0;
+ ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRCR, &cr, 1);
+ nor->is_addrvalid = false;
+ nor->addr_width = 4;
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev ||
@@ -2562,6 +3062,9 @@ static int spi_nor_select_erase(struct spi_nor *nor)
u32 wanted_size = nor->info->sector_size;
int i;
+ if (mtd->erasesize &&
+ nor->jedec_id != CFI_MFR_AMD)
+ return 0;
/*
* The previous implementation handling Sector Erase commands assumed
* that the SPI flash memory has an uniform layout then used only one
@@ -2581,6 +3084,9 @@ static int spi_nor_select_erase(struct spi_nor *nor)
return -EINVAL;
nor->erase_opcode = erase->opcode;
mtd->erasesize = erase->size;
+ if (nor->shift)
+ mtd->erasesize = mtd->erasesize << nor->shift;
+
return 0;
}
@@ -2629,6 +3135,12 @@ static int spi_nor_default_setup(struct spi_nor *nor,
* Yet another reason to switch to spi-mem.
*/
ignored_mask = SNOR_HWCAPS_X_X_X;
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ if (hwcaps->mask & SNOR_HWCAPS_READ_8_8_8)
+ ignored_mask &= ~SNOR_HWCAPS_READ_8_8_8;
+ if (hwcaps->mask & SNOR_HWCAPS_PP_8_8_8)
+ ignored_mask &= ~SNOR_HWCAPS_PP_8_8_8;
+ }
if (shared_mask & ignored_mask) {
dev_dbg(nor->dev,
"SPI n-n-n protocols are not supported.\n");
@@ -2772,6 +3284,13 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
0, 8, SPINOR_OP_READ_1_1_8,
SNOR_PROTO_1_1_8);
+
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_8_8_8],
+ 0, 16, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_8_8_8);
+ }
}
/* Page Program settings. */
@@ -2779,6 +3298,18 @@ static void spi_nor_info_init_params(struct spi_nor *nor)
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+ if (info->flags & SPI_NOR_OCTAL_WRITE) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_8;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_8],
+ SPINOR_OP_PP_1_1_8, SNOR_PROTO_1_1_8);
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8],
+ SPINOR_OP_PP_1_1_8,
+ SNOR_PROTO_8_8_8);
+ }
+ }
+
/*
* Sector Erase settings. Sort Erase Types in ascending order, with the
* smallest erase size starting at BIT(0).
@@ -2915,6 +3446,26 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
return nor->params->quad_enable(nor);
}
+static void spi_nor_prot_unlock(struct spi_nor *nor)
+{
+ if (nor->info->flags & SST_GLOBAL_PROT_UNLK) {
+ spi_nor_write_enable(nor);
+ if (nor->spimem) {
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(GLOBAL_BLKPROT_UNLK, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ /* Unlock global write protection bits */
+ nor->controller_ops->write_reg(nor, GLOBAL_BLKPROT_UNLK, NULL, 0);
+ }
+ }
+ spi_nor_wait_till_ready(nor);
+}
+
/**
* spi_nor_unlock_all() - Unlocks the entire flash memory array.
* @nor: pointer to a 'struct spi_nor'.
@@ -2926,9 +3477,14 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
*/
static int spi_nor_unlock_all(struct spi_nor *nor)
{
- if (nor->flags & SNOR_F_HAS_LOCK)
- return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+ const struct flash_info *info = nor->info;
+ if (nor->flags & SNOR_F_HAS_LOCK) {
+ if (info->flags & SST_GLOBAL_PROT_UNLK) {
+ spi_nor_prot_unlock(nor);
+ }
+ return spi_nor_unlock(&nor->mtd, 0, nor->params->size);
+ }
return 0;
}
@@ -2948,7 +3504,8 @@ static int spi_nor_init(struct spi_nor *nor)
return err;
}
- if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ (nor->jedec_id != CFI_MFR_AMD)) {
/*
* If the RESET# pin isn't hooked up properly, or the system
* otherwise doesn't perform a reset command in the boot
@@ -2981,7 +3538,9 @@ void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
- nor->flags & SNOR_F_BROKEN_RESET)
+ (nor->flags & SNOR_F_BROKEN_RESET) &&
+ (nor->jedec_id != CFI_MFR_AMD) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
nor->params->set_4byte_addr_mode(nor, false);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
@@ -3005,13 +3564,57 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
static int spi_nor_set_addr_width(struct spi_nor *nor)
{
+ struct device_node *np = spi_nor_get_flash_node(nor);
+ struct device_node *np_spi;
if (nor->addr_width) {
/* already configured from SFDP */
} else if (nor->info->addr_width) {
nor->addr_width = nor->info->addr_width;
} else if (nor->mtd.size > 0x1000000) {
- /* enable 4-byte addressing if the device exceeds 16MiB */
- nor->addr_width = 4;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) {
+ int status;
+
+ nor->addr_width = 3;
+ nor->params->set_4byte_addr_mode(nor, false);
+ status = read_ear(nor, (struct flash_info *)nor->info);
+ if (status < 0)
+ dev_warn(nor->dev, "failed to read ear reg\n");
+ else
+ nor->curbank = status & EAR_SEGMENT_MASK;
+ } else {
+#endif
+ /*
+ * enable 4-byte addressing if the
+ * device exceeds 16MiB
+ */
+ nor->addr_width = 4;
+ if (nor->jedec_id == CFI_MFR_AMD ||
+ nor->info->flags & SPI_NOR_4B_OPCODES) {
+ spi_nor_set_4byte_opcodes(nor);
+ } else {
+ np_spi = of_get_next_parent(np);
+ if (of_property_match_string(np_spi,
+ "compatible",
+ "xlnx,xps-spi-2.00.a") >= 0) {
+ nor->addr_width = 3;
+ nor->params->set_4byte_addr_mode(nor, false);
+ } else {
+ nor->params->set_4byte_addr_mode(nor, true);
+ if (nor->isstacked) {
+ nor->spi->master->flags |=
+ SPI_MASTER_U_PAGE;
+ nor->params->set_4byte_addr_mode(nor, true);
+ nor->spi->master->flags &=
+ ~SPI_MASTER_U_PAGE;
+ }
+ }
+ }
+#ifdef CONFIG_OF
+ }
+#endif
} else {
nor->addr_width = 3;
}
@@ -3040,7 +3643,7 @@ static void spi_nor_debugfs_init(struct spi_nor *nor,
info->id_len, info->id);
}
-static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
+static struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
const char *name)
{
const struct flash_info *info = NULL;
@@ -3062,7 +3665,7 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
jinfo = spi_nor_read_id(nor);
if (IS_ERR(jinfo)) {
- return jinfo;
+ return (struct flash_info *)jinfo;
} else if (jinfo != info) {
/*
* JEDEC knows better, so overwrite platform ID. We
@@ -3077,18 +3680,20 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
}
}
- return info;
+ return (struct flash_info *)info;
}
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
- const struct flash_info *info;
+ struct flash_info *info = NULL;
struct device *dev = nor->dev;
struct mtd_info *mtd = &nor->mtd;
struct device_node *np = spi_nor_get_flash_node(nor);
int ret;
int i;
+ struct device_node *np_spi;
+ u32 is_dual;
ret = spi_nor_check(nor);
if (ret)
@@ -3134,6 +3739,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (info->flags & SPI_NOR_HAS_LOCK)
nor->flags |= SNOR_F_HAS_LOCK;
+ if ((u16)nor->jedec_id != CFI_MFR_MICRON)
+ nor->flags |= SNOR_F_BROKEN_OCTAL_DDR;
+
mtd->_write = spi_nor_write;
/* Init flash parameters based on flash_info struct and SFDP */
@@ -3150,6 +3758,73 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = nor->params->size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ nor->page_size = nor->params->page_size;
+#ifdef CONFIG_OF
+ np_spi = of_get_next_parent(np);
+ if (((of_property_match_string(np_spi, "compatible",
+ "xlnx,zynq-qspi-1.0") >= 0) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,zynqmp-qspi-1.0") >= 0)) ||
+ (of_property_match_string(np_spi, "compatible",
+ "xlnx,versal-qspi-1.0") >= 0)) {
+ if (of_property_read_u32(np_spi, "is-dual",
+ &is_dual) < 0) {
+ /* Default to single if prop not defined */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ } else {
+ if (is_dual == 1) {
+ /* dual parallel */
+ nor->shift = 1;
+ info->sector_size <<= nor->shift;
+ info->page_size <<= nor->shift;
+ mtd->size <<= nor->shift;
+ nor->isparallel = 1;
+ nor->isstacked = 0;
+ nor->spi->master->flags |=
+ (SPI_MASTER_DATA_STRIPE
+ | SPI_MASTER_BOTH_CS);
+ } else {
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+#else
+ u32 is_stacked;
+
+ if (of_property_read_u32(np_spi,
+ "is-stacked",
+ &is_stacked) < 0) {
+ is_stacked = 0;
+ }
+ if (is_stacked) {
+ /* dual stacked */
+ nor->shift = 0;
+ mtd->size <<= 1;
+ info->n_sectors <<= 1;
+ nor->isstacked = 1;
+ nor->isparallel = 0;
+ } else {
+ /* single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+ }
+#endif
+ }
+ }
+ }
+#else
+ /* Default to single */
+ nor->shift = 0;
+ nor->isstacked = 0;
+ nor->isparallel = 0;
+#endif
+
mtd->_resume = spi_nor_resume;
if (nor->params->locking_ops) {
@@ -3181,7 +3856,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->flags |= MTD_NO_ERASE;
mtd->dev.parent = dev;
- nor->page_size = nor->params->page_size;
+ nor->jedec_id = info->id[0];
mtd->writebufsize = nor->page_size;
if (of_property_read_bool(np, "broken-flash-reset"))
@@ -3218,6 +3893,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+ if (hwcaps->mask & (SNOR_HWCAPS_READ_8_8_8 | SNOR_HWCAPS_PP_8_8_8)) {
+ if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
+ ret = spi_nor_switch_micron_octal_ddr(nor);
+ if (ret)
+ return ret;
+ }
+ }
+
if (mtd->numeraseregions)
for (i = 0; i < mtd->numeraseregions; i++)
dev_dbg(dev,
@@ -3307,6 +3990,7 @@ static int spi_nor_probe(struct spi_mem *spimem)
spi_mem_set_drvdata(spimem, nor);
+ nor->spi = spi;
if (data && data->name)
nor->mtd.name = data->name;
@@ -3371,6 +4055,9 @@ static void spi_nor_shutdown(struct spi_mem *spimem)
{
struct spi_nor *nor = spi_mem_get_drvdata(spimem);
+ if (nor->addr_width == 3 &&
+ (nor->mtd.size >> nor->shift) > 0x1000000)
+ spi_nor_write_ear(nor, 0);
spi_nor_restore(nor);
}
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 6f2f6b27173f..25dddc092b6c 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -26,6 +26,7 @@ enum spi_nor_option_flags {
SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
SNOR_F_HAS_4BIT_BP = BIT(12),
SNOR_F_HAS_SR_BP3_BIT6 = BIT(13),
+ SNOR_F_BROKEN_OCTAL_DDR = BIT(14),
};
struct spi_nor_read_command {
@@ -311,7 +312,12 @@ struct flash_info {
* BP3 is bit 6 of status register.
* Must be used with SPI_NOR_4BIT_BP.
*/
+#define SST_GLOBAL_PROT_UNLK BIT(16) /* Unlock the Global protection for
+ * sst flashes
+ */
+#define SPI_NOR_OCTAL_WRITE BIT(17) /* Flash supports Octal Write */
+ int (*quad_enable)(struct spi_nor *nor);
/* Part specific fixup hooks. */
const struct spi_nor_fixups *fixups;
};
@@ -402,7 +408,7 @@ extern const struct spi_nor_manufacturer spi_nor_xmc;
int spi_nor_write_enable(struct spi_nor *nor);
int spi_nor_write_disable(struct spi_nor *nor);
int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable);
-int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
+int spi_nor_write_ear(struct spi_nor *nor, u32 ear);
int spi_nor_wait_till_ready(struct spi_nor *nor);
int spi_nor_lock_and_prep(struct spi_nor *nor);
void spi_nor_unlock_and_unprep(struct spi_nor *nor);
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index ffcb60e54a80..18691ee069c2 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -32,33 +32,51 @@ static struct spi_nor_fixups is25lp256_fixups = {
static const struct flash_info issi_parts[] = {
/* ISSI */
+ { "is25wp080d", INFO(0x9d7014, 0, 64 * 1024, 32, SECT_4K |
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp016d", INFO(0x9d7015, 0, 64 * 1024, 32, SECT_4K |
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK) },
{ "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_HAS_LOCK) },
{ "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_HAS_LOCK) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
- SECT_4K | SPI_NOR_DUAL_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_HAS_LOCK) },
{ "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES)
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK)
.fixups = &is25lp256_fixups },
+ { "is25wp256d", INFO(0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ
+ | SPI_NOR_HAS_LOCK) },
{ "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES)
.fixups = &is25lp256_fixups },
+ { "is25lp512m", INFO(0x9d601a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "is25wp512m", INFO(0x9d701a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK |
+ SPI_NOR_4B_OPCODES) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index ab0f963d630c..b623f9f9da57 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -76,6 +76,8 @@ static const struct flash_info macronix_parts[] = {
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
+ { "mx66u1g45g", INFO(0xc2253b, 0, 64 * 1024, 2048, SECT_4K |
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048,
SPI_NOR_QUAD_READ) },
};
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 6c034b9718e2..a3bcdc9a36a6 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -11,10 +11,13 @@
static const struct flash_info micron_parts[] = {
{ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES) },
+ { "mt35xu01g", INFO(0x2c5b1b, 0, 128 * 1024, 1024,
+ SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES) },
{ "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
- SPI_NOR_4B_OPCODES) },
+ SPI_NOR_OCTAL_WRITE | SPI_NOR_4B_OPCODES) },
};
static const struct flash_info st_parts[] = {
@@ -28,13 +31,13 @@ static const struct flash_info st_parts[] = {
SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
- { "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
- SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K |
+ SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
+ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR |
+ SPI_NOR_HAS_LOCK) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K |
USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
@@ -46,29 +49,28 @@ static const struct flash_info st_parts[] = {
{ "mt25ql512a", INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
- { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
- SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK) },
{ "mt25qu512a", INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
- SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES |
+ SPI_NOR_HAS_LOCK) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
- { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
- { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_LOCK) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K |
+ USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE |
+ SPI_NOR_HAS_LOCK) },
{ "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
+ NO_CHIP_ERASE | SPI_NOR_HAS_LOCK) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
- NO_CHIP_ERASE) },
+ NO_CHIP_ERASE | SPI_NOR_HAS_LOCK) },
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index eac1c22b730f..7731b5fa6b01 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -39,10 +39,10 @@ static const struct flash_info spansion_parts[] = {
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
+ USE_CLSR | SPI_NOR_HAS_LOCK) },
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
+ USE_CLSR | SPI_NOR_HAS_LOCK) },
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
@@ -91,6 +91,8 @@ static const struct flash_info spansion_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
+ { "sst26wf016B", INFO(0xbf2651, 0, 64 * 1024, 32,
+ SECT_4K | SST_GLOBAL_PROT_UNLK) },
};
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index e0af6d25d573..14f1eeb268f0 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -35,7 +35,7 @@ static const struct flash_info sst_parts[] = {
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32,
- SECT_4K | SPI_NOR_DUAL_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SST_GLOBAL_PROT_UNLK) },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c1dbab8c896d..f4b544b69646 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -259,7 +259,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
@@ -272,7 +272,7 @@ static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index ab827fb4b6b9..91c30cd374f6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -79,6 +79,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_PBUFRXCUT 0x0044 /* RX Partial Store and Forward */
#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
@@ -90,6 +91,9 @@
#define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
+#define GEM_WOL 0x00B8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -155,6 +159,7 @@
#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
+#define GEM_PCSCNTRL 0x0200 /* PCS Control */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -227,6 +232,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15
+#define MACB_PTPUNI_OFFSET 20
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
@@ -314,6 +321,11 @@
#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */
#define GEM_ADDR64_SIZE 1
+/* Bitfields in PBUFRXCUT */
+#define GEM_WTRMRK_OFFSET 0 /* Watermark value offset */
+#define GEM_WTRMRK_SIZE 12
+#define GEM_ENCUTTHRU_OFFSET 31 /* Enable RX partial store and forward */
+#define GEM_ENCUTTHRU_SIZE 1
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */
@@ -376,7 +388,7 @@
#define MACB_PFR_SIZE 1
#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
#define MACB_PTZ_SIZE 1
-#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
+#define MACB_WOL_OFFSET 28 /* Enable WOL received interrupt */
#define MACB_WOL_SIZE 1
#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */
#define MACB_DRQFR_SIZE 1
@@ -455,6 +467,10 @@
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
+/* Bitfields in PCSCNTRL */
+#define GEM_PCSAUTONEG_OFFSET 12
+#define GEM_PCSAUTONEG_SIZE 1
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
@@ -568,6 +584,9 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define GEM_RBQP_DISABLE 0x1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -652,7 +671,12 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_PCS 0x00000400
+#define MACB_CAPS_PARTIAL_STORE_FORWARD 0x00000800
+#define MACB_CAPS_WOL 0x00000200
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
+#define MACB_CAPS_NEED_TSUCLK 0x00001000
+#define MACB_CAPS_QUEUE_DISABLE 0x00002000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1168,6 +1192,7 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1190,6 +1215,8 @@ struct macb {
struct gem_stats gem;
} hw_stats;
+ dma_addr_t rx_ring_tieoff_dma;
+
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
@@ -1214,6 +1241,9 @@ struct macb {
u32 wol;
+ /* holds value of rx watermark value for pbuf_rxcutthru register */
+ u16 rx_watermark;
+
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3a157be857b0..839a5d59d31c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -36,6 +36,8 @@
#include <linux/tcp.h>
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
+#include <linux/crc32.h>
+#include <linux/inetdevice.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -82,14 +84,10 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
-
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
-#define MACB_HALT_TIMEOUT 1230
-
+#define MACB_HALT_TIMEOUT 14000
#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
@@ -282,6 +280,9 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -669,7 +670,8 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
/* Enable Rx and Tx */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE) |
+ MACB_BIT(PTPUNI));
netif_tx_wake_all_queues(ndev);
}
@@ -767,6 +769,7 @@ static int macb_mdiobus_register(struct macb *bp)
static int macb_mii_init(struct macb *bp)
{
+ struct device_node *np, *mdio_np;
int err = -ENXIO;
/* Enable management port */
@@ -784,10 +787,19 @@ static int macb_mii_init(struct macb *bp)
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
- bp->mii_bus->parent = &bp->pdev->dev;
+ bp->mii_bus->parent = &bp->dev->dev;
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
+ np = bp->pdev->dev.of_node;;
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
+ err = of_mdiobus_register(bp->mii_bus, mdio_np);
+ if (err)
+ goto err_out_free_mdiobus;
+ }
+
err = macb_mdiobus_register(bp);
if (err)
goto err_out_free_mdiobus;
@@ -899,6 +911,7 @@ static void macb_tx_error_task(struct work_struct *work)
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ bool halt_timeout = false;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
(unsigned int)(queue - bp->queues),
@@ -919,9 +932,11 @@ static void macb_tx_error_task(struct work_struct *work)
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
- if (macb_halt_tx(bp))
- /* Just complain for now, reinitializing TX path can be good */
+ if (macb_halt_tx(bp)) {
netdev_err(bp->dev, "BUG: halt tx timed out\n");
+ macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
+ halt_timeout = true;
+ }
/* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
@@ -992,6 +1007,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, TSR, macb_readl(bp, TSR));
queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+ if (halt_timeout)
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+
/* Now we are ready to start transmission again */
netif_tx_start_all_queues(bp->dev);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -1094,7 +1112,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1133,6 +1150,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1163,6 +1181,15 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+static int macb_validate_hw_csum(struct sk_buff *skb)
+{
+ u32 pkt_csum = *((u32 *)&skb->data[skb->len - ETH_FCS_LEN]);
+ u32 csum = ~crc32_le(~0, skb_mac_header(skb),
+ skb->len + ETH_HLEN - ETH_FCS_LEN);
+
+ return (pkt_csum != csum);
+}
+
static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
int budget)
{
@@ -1224,6 +1251,16 @@ static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
+
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+ break;
+ }
+ }
+
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM &&
!(bp->dev->flags & IFF_PROMISC) &&
@@ -1321,6 +1358,19 @@ static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
break;
}
+ /* Validate MAC fcs if RX checsum offload disabled */
+ if (!(bp->dev->features & NETIF_F_RXCSUM)) {
+ if (macb_validate_hw_csum(skb)) {
+ netdev_err(bp->dev, "incorrect FCS\n");
+ bp->dev->stats.rx_dropped++;
+
+ /* Make descriptor updates visible to hardware */
+ wmb();
+
+ return 1;
+ }
+ }
+
/* Make descriptor updates visible to hardware */
wmb();
@@ -1535,6 +1585,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
spin_lock(&bp->lock);
while (status) {
+ if (status & MACB_BIT(WOL)) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ break;
+ }
+
/* close possible race with dev_close */
if (unlikely(!netif_running(dev))) {
queue_writel(queue, IDR, -1);
@@ -1785,7 +1841,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ (skb->data_len == 0))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -1880,9 +1937,11 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
struct sk_buff *nskb;
u32 fcs;
+ /* Not available for GSO and fragments */
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size ||
+ ((*skb)->data_len > 0))
return 0;
if (padlen <= 0) {
@@ -1936,7 +1995,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
- bool is_lso, is_udp = 0;
+ bool is_lso, is_udp = false;
netdev_tx_t ret = NETDEV_TX_OK;
if (macb_clear_csum(skb)) {
@@ -2098,6 +2157,12 @@ static void macb_free_consistent(struct macb *bp)
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
@@ -2187,6 +2252,16 @@ static int macb_alloc_consistent(struct macb *bp)
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2194,6 +2269,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *d = bp->rx_ring_tieoff;
+
+ if (bp->num_queues > 1) {
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, d, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ d->ctrl = 0;
+ }
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2217,6 +2305,9 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_init_tieoff(bp);
+
}
static void macb_init_rings(struct macb *bp)
@@ -2234,6 +2325,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -2256,6 +2349,10 @@ static void macb_reset_hw(struct macb *bp)
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
+ /* Disable RX partial store and forward and reset watermark value */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ gem_writel(bp, PBUFRXCUT, 0xFFF);
+
/* Disable all interrupts */
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, -1);
@@ -2389,7 +2486,11 @@ static void macb_init_hw(struct macb *bp)
config = macb_mdc_clk_div(bp);
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
- config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
+ /* Do not discard Rx FCS if RX checsum offload disabled */
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
+
if (bp->caps & MACB_CAPS_JUMBO)
config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
else
@@ -2408,7 +2509,21 @@ static void macb_init_hw(struct macb *bp)
if (bp->caps & MACB_CAPS_JUMBO)
bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
macb_configure_dma(bp);
+
+ /* Enable RX partial store and forward and set watermark */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ gem_writel(bp, PBUFRXCUT,
+ (gem_readl(bp, PBUFRXCUT) &
+ GEM_BF(WTRMRK, bp->rx_watermark)) |
+ GEM_BIT(ENCUTTHRU));
+ }
+
}
/* The hash address register is 64 bits long and takes up two
@@ -2820,46 +2935,6 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs_buff[13] = gem_readl(bp, DMACFG);
}
-static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- phylink_ethtool_get_wol(bp->phylink, wol);
- wol->supported |= WAKE_MAGIC;
-
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
-}
-
-static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
- struct macb *bp = netdev_priv(netdev);
- int ret;
-
- /* Pass the order to phylink layer */
- ret = phylink_ethtool_set_wol(bp->phylink, wol);
- /* Don't manage WoL on MAC if handled by the PHY
- * or if there's a failure in talking to the PHY
- */
- if (!ret || ret != -EOPNOTSUPP)
- return ret;
-
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
-
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
-
- return 0;
-}
-
static int macb_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *kset)
{
@@ -3310,8 +3385,6 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
- .get_wol = macb_get_wol,
- .set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3457,10 +3530,29 @@ static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{
u32 dcfg;
+ int retval;
if (dt_conf)
bp->caps = dt_conf->caps;
+ /* By default we set to partial store and forward mode for zynqmp.
+ * Disable if not set in devicetree.
+ */
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD) {
+ retval = of_property_read_u16(bp->pdev->dev.of_node,
+ "rx-watermark",
+ &bp->rx_watermark);
+
+ /* Disable partial store and forward in case of error or
+ * invalid watermark value
+ */
+ if (retval || bp->rx_watermark > 0xFFF) {
+ dev_info(&bp->pdev->dev,
+ "Not enabling partial store and forward\n");
+ bp->caps &= ~MACB_CAPS_PARTIAL_STORE_FORWARD;
+ }
+ }
+
if (hw_is_gem(bp->regs, bp->native_io)) {
bp->caps |= MACB_CAPS_MACB_IS_GEM;
@@ -3709,6 +3801,8 @@ static int macb_init(struct platform_device *pdev)
/* Checksum offload is only available on gem with packet buffer */
if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ if (bp->caps & MACB_CAPS_PARTIAL_STORE_FORWARD)
+ dev->hw_features &= ~NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features;
@@ -3760,6 +3854,11 @@ static int macb_init(struct platform_device *pdev)
val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
macb_writel(bp, NCFGR, val);
+ if ((bp->phy_interface == PHY_INTERFACE_MODE_SGMII) &&
+ (bp->caps & MACB_CAPS_PCS))
+ gem_writel(bp, PCSCNTRL,
+ gem_readl(bp, PCSCNTRL) | GEM_BIT(PCSAUTONEG));
+
return 0;
}
@@ -4299,7 +4398,20 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_PCS |
+ MACB_CAPS_PARTIAL_STORE_FORWARD | MACB_CAPS_WOL,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_PCS | MACB_CAPS_PARTIAL_STORE_FORWARD |
+ MACB_CAPS_WOL | MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4331,6 +4443,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "cdns,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4362,7 +4475,7 @@ static int macb_probe(struct platform_device *pdev)
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
- const char *mac;
+ const u8 *mac;
struct macb *bp;
int err, val;
@@ -4418,20 +4531,13 @@ static int macb_probe(struct platform_device *pdev)
}
bp->num_queues = num_queues;
bp->queue_mask = queue_mask;
- if (macb_config)
- bp->dma_burst_length = macb_config->dma_burst_length;
+ bp->dma_burst_length = macb_config->dma_burst_length;
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
bp->rx_clk = rx_clk;
bp->tsu_clk = tsu_clk;
- if (macb_config)
- bp->jumbo_max_len = macb_config->jumbo_max_len;
-
- bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
spin_lock_init(&bp->lock);
@@ -4496,22 +4602,24 @@ static int macb_probe(struct platform_device *pdev)
err = init(pdev);
if (err)
goto err_out_free_netdev;
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_free_netdev;
+ }
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_unregister_netdev;
netif_carrier_off(dev);
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_unregister_mdio;
- }
-
tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
(unsigned long)bp);
+ if (bp->caps & MACB_CAPS_WOL)
+ device_set_wakeup_capable(&bp->dev->dev, 1);
+
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
dev->base_addr, dev->irq, dev->dev_addr);
@@ -4521,9 +4629,8 @@ static int macb_probe(struct platform_device *pdev)
return 0;
-err_out_unregister_mdio:
- mdiobus_unregister(bp->mii_bus);
- mdiobus_free(bp->mii_bus);
+err_out_unregister_netdev:
+ unregister_netdev(dev);
err_out_free_netdev:
free_netdev(dev);
@@ -4579,15 +4686,53 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q;
+ u32 ctrl, arpipmask;
if (!netif_running(netdev))
return 0;
- if (bp->wol & MACB_WOL_ENABLED) {
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
+ ctrl = macb_readl(bp, NCR);
+ ctrl &= ~(MACB_BIT(TE) | MACB_BIT(RE));
+ macb_writel(bp, NCR, ctrl);
+ /* Tie off RX queues */
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ queue_writel(queue, RBQP, GEM_RBQP_DISABLE);
+ else
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+ }
+ ctrl = macb_readl(bp, NCR);
+ ctrl |= MACB_BIT(RE);
+ macb_writel(bp, NCR, ctrl);
+ gem_writel(bp, NCFGR, gem_readl(bp, NCFGR) & ~MACB_BIT(NBC));
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+ macb_readl(bp, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
+
+ /* Enable WOL (Q0 only) and disable all other interrupts */
macb_writel(bp, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ for (q = 1, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ queue_writel(queue, IDR, bp->rx_intr_mask |
+ MACB_TX_INT_FLAGS |
+ MACB_BIT(HRESP));
+ }
+
+ arpipmask = cpu_to_be32p(&bp->dev->ip_ptr->ifa_list->ifa_local)
+ & 0xFFFF;
+ gem_writel(bp, WOL, MACB_BIT(ARP) | arpipmask);
+ spin_unlock_irqrestore(&bp->lock, flags);
enable_irq_wake(bp->queues[0].irq);
netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_disable(&queue->napi);
} else {
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
@@ -4620,6 +4765,7 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues;
+ unsigned long flags;
unsigned int q;
if (!netif_running(netdev))
@@ -4628,10 +4774,20 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!device_may_wakeup(dev))
pm_runtime_force_resume(dev);
- if (bp->wol & MACB_WOL_ENABLED) {
+ if (device_may_wakeup(&bp->dev->dev)) {
+ spin_lock_irqsave(&bp->lock, flags);
macb_writel(bp, IDR, MACB_BIT(WOL));
- macb_writel(bp, WOL, 0);
+ gem_writel(bp, WOL, 0);
+ /* Clear Q0 ISR as WOL was enabled on Q0 */
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, -1);
disable_irq_wake(bp->queues[0].irq);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ macb_writel(bp, NCR, MACB_BIT(MPE));
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue)
+ napi_enable(&queue->napi);
+ netif_carrier_on(netdev);
} else {
macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -4670,7 +4826,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
clk_disable_unprepare(bp->pclk);
clk_disable_unprepare(bp->rx_clk);
}
- clk_disable_unprepare(bp->tsu_clk);
+
+ if (!(device_may_wakeup(&bp->dev->dev) &&
+ (bp->caps & MACB_CAPS_NEED_TSUCLK)))
+ clk_disable_unprepare(bp->tsu_clk);
return 0;
}
@@ -4686,7 +4845,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
}
- clk_prepare_enable(bp->tsu_clk);
+
+ if (!(device_may_wakeup(&bp->dev->dev) &&
+ (bp->caps & MACB_CAPS_NEED_TSUCLK)))
+ clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 43a3f0dbf857..9c751a7ba153 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -242,6 +242,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
u32 dma_desc_ts_2, struct timespec64 *ts)
{
struct timespec64 tsu;
+ bool sec_rollover = false;
ts->tv_sec = (GEM_BFEXT(DMA_SECH, dma_desc_ts_2) << GEM_DMA_SECL_SIZE) |
GEM_BFEXT(DMA_SECL, dma_desc_ts_1);
@@ -259,9 +260,12 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
if ((ts->tv_sec & (GEM_DMA_SEC_TOP >> 1)) &&
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
- ts->tv_sec -= GEM_DMA_SEC_TOP;
+ sec_rollover = true;
+
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+ if (sec_rollover)
+ ts->tv_sec -= GEM_DMA_SEC_TOP;
return 0;
}
@@ -275,6 +279,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
if (GEM_BFEXT(DMA_RXVALID, desc->addr)) {
desc_ptp = macb_ptp_desc(bp, desc);
+ /* Unlikely but check */
+ if (!desc_ptp) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not supported in BD\n");
+ return;
+ }
gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
@@ -307,8 +317,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
return -ENOMEM;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
desc_ptp = macb_ptp_desc(queue->bp, desc);
+ /* Unlikely but check */
+ if (!desc_ptp)
+ return -EINVAL;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_timestamp = &queue->tx_timestamps[head];
tx_timestamp->skb = skb;
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
@@ -385,11 +398,14 @@ void gem_ptp_init(struct net_device *dev)
void gem_ptp_remove(struct net_device *ndev)
{
struct macb *bp = netdev_priv(ndev);
+ unsigned long flags;
if (bp->ptp_clock)
ptp_clock_unregister(bp->ptp_clock);
+ spin_lock_irqsave(&bp->tsu_clk_lock, flags);
gem_ptp_clear_timer(bp);
+ spin_unlock_irqrestore(&bp->tsu_clk_lock, flags);
dev_info(&bp->pdev->dev, "%s ptp clock unregistered.\n",
GEM_PTP_TIMER_NAME);
@@ -462,7 +478,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
return -ERANGE;
/* fall through */
case HWTSTAMP_TX_ON:
- tx_bd_control = TSTAMP_ALL_FRAMES;
+ tx_bd_control = TSTAMP_ALL_PTP_FRAMES;
break;
default:
return -ERANGE;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 0810af8193cb..f60617a7faba 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
@@ -18,17 +18,30 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
select PHYLIB
+ depends on HAS_IOMEM
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- select PHYLINK
+ select PHYLIB
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
+config XILINX_AXI_EMAC_HWTSTAMP
+ bool "Generate hardware packet timestamps"
+ depends on XILINX_AXI_EMAC
+ select PTP_1588_CLOCK
+ default n
+ ---help---
+ Generate hardware packet timestamps. This is to facilitate IEE 1588.
+config AXIENET_HAS_MCDMA
+ bool "AXI Ethernet is configured with MCDMA"
+ depends on XILINX_AXI_EMAC
+ default n
+ ---help---
+ When hardware is generated with AXI Ethernet with MCDMA select this option.
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
@@ -37,4 +50,54 @@ config XILINX_LL_TEMAC
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
core used in Xilinx Spartan and Virtex FPGAs
+config XILINX_TSN
+ bool "Enable Xilinx's TSN IP"
+ default n
+ ---help---
+ Enable Xilinx's TSN IP.
+
+config XILINX_TSN_PTP
+ bool "Generate hardware packet timestamps using Xilinx's TSN IP"
+ depends on XILINX_TSN
+ select PTP_1588_CLOCK
+ default y
+ ---help---
+ Generate hardare packet timestamps. This is to facilitate IEE 1588.
+
+config XILINX_TSN_QBV
+ bool "Support Qbv protocol in TSN"
+ depends on XILINX_TSN_PTP
+ select PTP_1588_CLOCK
+ default y
+ ---help---
+ Enables TSN Qbv protocol.
+
+config XILINX_TSN_SWITCH
+ bool "Support TSN switch"
+ depends on XILINX_TSN
+ default y
+ ---help---
+ Enable Xilinx's TSN Switch support.
+
+config XILINX_TSN_QCI
+ bool "Support Qci protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QCI protocol.
+
+config XILINX_TSN_CB
+ bool "Support CB protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN CB protocol support.
+
+config XILINX_TSN_QBR
+ bool "Support QBR protocol in TSN"
+ depends on XILINX_TSN_SWITCH
+ default y
+ ---help---
+ Enable TSN QBR protocol support.
+
endif # NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 7d7dc1771423..54de845ee4a1 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -1,10 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for the Xilink network device drivers.
+# Makefile for the Xilinx network device drivers.
#
ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
-xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o
+obj-$(CONFIG_XILINX_TSN) += xilinx_tsn_ep.o
+obj-$(CONFIG_XILINX_TSN_PTP) += xilinx_tsn_ptp_xmit.o xilinx_tsn_ptp_clock.o
+obj-$(CONFIG_XILINX_TSN_QBV) += xilinx_tsn_shaper.o
+obj-$(CONFIG_XILINX_TSN_QCI) += xilinx_tsn_qci.o
+obj-$(CONFIG_XILINX_TSN_CB) += xilinx_tsn_cb.o
+obj-$(CONFIG_XILINX_TSN_SWITCH) += xilinx_tsn_switch.o
+xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o xilinx_axienet_dma.o
obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
+obj-$(CONFIG_XILINX_TSN_QBR) += xilinx_tsn_preemption.o
+obj-$(CONFIG_AXIENET_HAS_MCDMA) += xilinx_axienet_mcdma.o
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index fbaf3c987d9c..9edcb46152d9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -13,7 +13,9 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
-#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -25,29 +27,39 @@
#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
+/* DMA address width min and max range */
+#define XAE_DMA_MASK_MIN 32
+#define XAE_DMA_MASK_MAX 64
+
+/* In AXI DMA Tx and Rx queue count is same */
+#define for_each_tx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_tx_queues; (var)++)
+
+#define for_each_rx_dma_queue(lp, var) \
+ for ((var) = 0; (var) < (lp)->num_rx_queues; (var)++)
/* Configuration options */
/* Accept all incoming packets. Default: disabled (cleared) */
-#define XAE_OPTION_PROMISC (1 << 0)
+#define XAE_OPTION_PROMISC BIT(0)
/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */
-#define XAE_OPTION_JUMBO (1 << 1)
+#define XAE_OPTION_JUMBO BIT(1)
/* VLAN Rx & Tx frame support. Default: disabled (cleared) */
-#define XAE_OPTION_VLAN (1 << 2)
+#define XAE_OPTION_VLAN BIT(2)
/* Enable recognition of flow control frames on Rx. Default: enabled (set) */
-#define XAE_OPTION_FLOW_CONTROL (1 << 4)
+#define XAE_OPTION_FLOW_CONTROL BIT(4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
* stripped. Default: disabled (set)
*/
-#define XAE_OPTION_FCS_STRIP (1 << 5)
+#define XAE_OPTION_FCS_STRIP BIT(5)
/* Generate FCS field and add PAD automatically for outgoing frames.
* Default: enabled (set)
*/
-#define XAE_OPTION_FCS_INSERT (1 << 6)
+#define XAE_OPTION_FCS_INSERT BIT(6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
@@ -55,13 +67,13 @@
* types of frames are encountered. When this option is cleared, the MAC will
* allow these types of frames to be received. Default: enabled (set)
*/
-#define XAE_OPTION_LENTYPE_ERR (1 << 7)
+#define XAE_OPTION_LENTYPE_ERR BIT(7)
/* Enable the transmitter. Default: enabled (set) */
-#define XAE_OPTION_TXEN (1 << 11)
+#define XAE_OPTION_TXEN BIT(11)
/* Enable the receiver. Default: enabled (set) */
-#define XAE_OPTION_RXEN (1 << 12)
+#define XAE_OPTION_RXEN BIT(12)
/* Default options set when device is initialized or reset */
#define XAE_OPTION_DEFAULTS \
@@ -122,7 +134,7 @@
/* Default TX/RX Threshold and waitbound values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
+#define XAXIDMA_DFT_RX_THRESHOLD 1
#define XAXIDMA_DFT_RX_WAITBOUND 254
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
@@ -141,6 +153,22 @@
#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40
+/* AXI Tx Timestamp Stream FIFO Register Definitions */
+#define XAXIFIFO_TXTS_ISR 0x00000000 /* Interrupt Status Register */
+#define XAXIFIFO_TXTS_TXFD 0x00000010 /* Tx Data Write Port */
+#define XAXIFIFO_TXTS_TLR 0x00000014 /* Transmit Length Register */
+#define XAXIFIFO_TXTS_RFO 0x0000001C /* Rx Fifo Occupancy */
+#define XAXIFIFO_TXTS_RDFR 0x00000018 /* Rx Fifo reset */
+#define XAXIFIFO_TXTS_RXFD 0x00000020 /* Rx Data Read Port */
+#define XAXIFIFO_TXTS_RLR 0x00000024 /* Receive Length Register */
+#define XAXIFIFO_TXTS_SRR 0x00000028 /* AXI4-Stream Reset */
+
+#define XAXIFIFO_TXTS_INT_RC_MASK 0x04000000
+#define XAXIFIFO_TXTS_RXFD_MASK 0x7FFFFFFF
+#define XAXIFIFO_TXTS_RESET_MASK 0x000000A5
+#define XAXIFIFO_TXTS_TAG_MASK 0xFFFF0000
+#define XAXIFIFO_TXTS_TAG_SHIFT 16
+
/* Axi Ethernet registers definition */
#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */
#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */
@@ -159,16 +187,20 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_RMFC_OFFSET 0x00000414 /* RX Max Frame Configuration */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
+#define XAE_TEMAC_IS_OFFSET 0x00000600 /* TEMAC Interrupt Status */
+#define XAE_TEMAC_IP_OFFSET 0x00000610 /* TEMAC Interrupt Pending Status */
+#define XAE_TEMAC_IE_OFFSET 0x00000620 /* TEMAC Interrupt Enable Status */
+#define XAE_TEMAC_IC_OFFSET 0x00000630 /* TEMAC Interrupt Clear Status */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMC_OFFSET 0x00000708 /* Frame Filter Control */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@@ -229,6 +261,7 @@
#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */
/* Bit masks for Axi Ethernet RCW1 register */
+#define XAE_RCW1_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not stripped) */
@@ -245,6 +278,7 @@
#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
+#define XAE_TC_INBAND1588_MASK 0x00400000 /* Inband 1588 Enable */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
/* In-Band FCS enable (FCS not generated) */
@@ -269,18 +303,7 @@
#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */
#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */
#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */
-
-/* Bit masks for Axi Ethernet PHYC register */
-#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/
-#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */
-#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */
-#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */
-#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */
-#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */
-#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */
-#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */
-#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */
-#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */
+#define XAE_EMMC_LINKSPD_2500 0x80000000 /* Link Speed mask for 2500 Mbit */
/* Bit masks for Axi Ethernet MDIO interface MC register */
#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */
@@ -298,38 +321,26 @@
#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */
#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */
-/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */
-#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
-
/* Bit masks for Axi Ethernet UAW1 register */
/* Station address bits [47:32]; Station address
* bits [31:0] are stored in register UAW0
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
-#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
-#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
+/* Bit masks for Axi Ethernet FMC register */
+#define XAE_FMC_PM_MASK 0x80000000 /* Promis. mode enable */
+#define XAE_FMC_IND_MASK 0x00000003 /* Index Mask */
#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */
-/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */
-#define XAE_PHY_TYPE_MII 0
-#define XAE_PHY_TYPE_GMII 1
-#define XAE_PHY_TYPE_RGMII_1_3 2
-#define XAE_PHY_TYPE_RGMII_2_0 3
-#define XAE_PHY_TYPE_SGMII 4
-#define XAE_PHY_TYPE_1000BASE_X 5
-
- /* Total number of entries in the hardware multicast table. */
+/* Total number of entries in the hardware multicast table. */
#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
-#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
-#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
-#define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
-#define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
-#define XAE_FEATURE_DMA_64BIT (1 << 4)
+#define XAE_FEATURE_PARTIAL_RX_CSUM BIT(0)
+#define XAE_FEATURE_PARTIAL_TX_CSUM BIT(1)
+#define XAE_FEATURE_FULL_RX_CSUM BIT(2)
+#define XAE_FEATURE_FULL_TX_CSUM BIT(3)
#define XAE_NO_CSUM_OFFLOAD 0
@@ -339,12 +350,142 @@
#define DELAY_OF_ONE_MILLISEC 1000
+#define XAXIENET_NAPI_WEIGHT 64
+
+/* Definition of 1588 PTP in Axi Ethernet IP */
+#define TX_TS_OP_NOOP 0x0
+#define TX_TS_OP_ONESTEP 0x1
+#define TX_TS_OP_TWOSTEP 0x2
+#define TX_TS_CSUM_UPDATE 0x1
+#define TX_PTP_CSUM_OFFSET 0x28
+#define TX_PTP_TS_OFFSET 0x4C
+
+/* XXV MAC Register Definitions */
+#define XXV_GT_RESET_OFFSET 0x00000000
+#define XXV_TC_OFFSET 0x0000000C
+#define XXV_RCW1_OFFSET 0x00000014
+#define XXV_JUM_OFFSET 0x00000018
+#define XXV_TICKREG_OFFSET 0x00000020
+#define XXV_STATRX_BLKLCK_OFFSET 0x0000040C
+#define XXV_USXGMII_AN_OFFSET 0x000000C8
+#define XXV_USXGMII_AN_STS_OFFSET 0x00000458
+
+/* XXV MAC Register Mask Definitions */
+#define XXV_GT_RESET_MASK BIT(0)
+#define XXV_TC_TX_MASK BIT(0)
+#define XXV_RCW1_RX_MASK BIT(0)
+#define XXV_RCW1_FCS_MASK BIT(1)
+#define XXV_TC_FCS_MASK BIT(1)
+#define XXV_MIN_JUM_MASK GENMASK(7, 0)
+#define XXV_MAX_JUM_MASK GENMASK(10, 8)
+#define XXV_RX_BLKLCK_MASK BIT(0)
+#define XXV_TICKREG_STATEN_MASK BIT(0)
+#define XXV_MAC_MIN_PKT_LEN 64
+
+/* USXGMII Register Mask Definitions */
+#define USXGMII_AN_EN BIT(5)
+#define USXGMII_AN_RESET BIT(6)
+#define USXGMII_AN_RESTART BIT(7)
+#define USXGMII_EN BIT(16)
+#define USXGMII_RATE_MASK 0x0E000700
+#define USXGMII_RATE_1G 0x04000200
+#define USXGMII_RATE_2G5 0x08000400
+#define USXGMII_RATE_10M 0x0
+#define USXGMII_RATE_100M 0x02000100
+#define USXGMII_RATE_5G 0x0A000500
+#define USXGMII_RATE_10G 0x06000300
+#define USXGMII_FD BIT(28)
+#define USXGMII_LINK_STS BIT(31)
+
+/* USXGMII AN STS register mask definitions */
+#define USXGMII_AN_STS_COMP_MASK BIT(16)
+
+/* MCDMA Register Definitions */
+#define XMCDMA_CR_OFFSET 0x00
+#define XMCDMA_SR_OFFSET 0x04
+#define XMCDMA_CHEN_OFFSET 0x08
+#define XMCDMA_CHSER_OFFSET 0x0C
+#define XMCDMA_ERR_OFFSET 0x10
+#define XMCDMA_PKTDROP_OFFSET 0x14
+#define XMCDMA_TXWEIGHT0_OFFSET 0x18
+#define XMCDMA_TXWEIGHT1_OFFSET 0x1C
+#define XMCDMA_RXINT_SER_OFFSET 0x20
+#define XMCDMA_TXINT_SER_OFFSET 0x28
+
+#define XMCDMA_CHOBS1_OFFSET 0x440
+#define XMCDMA_CHOBS2_OFFSET 0x444
+#define XMCDMA_CHOBS3_OFFSET 0x448
+#define XMCDMA_CHOBS4_OFFSET 0x44C
+#define XMCDMA_CHOBS5_OFFSET 0x450
+#define XMCDMA_CHOBS6_OFFSET 0x454
+
+#define XMCDMA_CHAN_RX_OFFSET 0x500
+
+/* Per Channel Registers */
+#define XMCDMA_CHAN_CR_OFFSET(chan_id) (0x40 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_SR_OFFSET(chan_id) (0x44 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_CURDESC_OFFSET(chan_id) (0x48 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_TAILDESC_OFFSET(chan_id) (0x50 + ((chan_id) - 1) * 0x40)
+#define XMCDMA_CHAN_PKTDROP_OFFSET(chan_id) (0x58 + ((chan_id) - 1) * 0x40)
+
+#define XMCDMA_RX_OFFSET 0x500
+
+/* MCDMA Mask registers */
+#define XMCDMA_CR_RUNSTOP_MASK BIT(0) /* Start/stop DMA channel */
+#define XMCDMA_CR_RESET_MASK BIT(2) /* Reset DMA engine */
+
+#define XMCDMA_SR_HALTED_MASK BIT(0)
+#define XMCDMA_SR_IDLE_MASK BIT(1)
+
+#define XMCDMA_IRQ_ERRON_OTHERQ_MASK BIT(3)
+#define XMCDMA_IRQ_PKTDROP_MASK BIT(4)
+#define XMCDMA_IRQ_IOC_MASK BIT(5)
+#define XMCDMA_IRQ_DELAY_MASK BIT(6)
+#define XMCDMA_IRQ_ERR_MASK BIT(7)
+#define XMCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XMCDMA_PKTDROP_COALESCE_MASK GENMASK(15, 8)
+#define XMCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XMCDMA_DELAY_MASK GENMASK(31, 24)
+
+#define XMCDMA_CHEN_MASK GENMASK(7, 0)
+#define XMCDMA_CHID_MASK GENMASK(7, 0)
+
+#define XMCDMA_ERR_INTERNAL_MASK BIT(0)
+#define XMCDMA_ERR_SLAVE_MASK BIT(1)
+#define XMCDMA_ERR_DECODE_MASK BIT(2)
+#define XMCDMA_ERR_SG_INT_MASK BIT(4)
+#define XMCDMA_ERR_SG_SLV_MASK BIT(5)
+#define XMCDMA_ERR_SG_DEC_MASK BIT(6)
+
+#define XMCDMA_PKTDROP_CNT_MASK GENMASK(31, 0)
+
+#define XMCDMA_BD_CTRL_TXSOF_MASK 0x80000000 /* First tx packet */
+#define XMCDMA_BD_CTRL_TXEOF_MASK 0x40000000 /* Last tx packet */
+#define XMCDMA_BD_CTRL_ALL_MASK 0xC0000000 /* All control bits */
+#define XMCDMA_BD_STS_ALL_MASK 0xF0000000 /* All status bits */
+
+#define XMCDMA_COALESCE_SHIFT 16
+#define XMCDMA_DELAY_SHIFT 24
+#define XMCDMA_DFT_TX_THRESHOLD 1
+
+#define XMCDMA_TXWEIGHT_CH_MASK(chan_id) GENMASK(((chan_id) * 4 + 3), \
+ (chan_id) * 4)
+#define XMCDMA_TXWEIGHT_CH_SHIFT(chan_id) ((chan_id) * 4)
+
+/* PTP Packet length */
+#define XAE_TX_PTP_LEN 16
+#define XXV_TX_PTP_LEN 12
+
+/* Macros used when AXI DMA h/w is configured without DRE */
+#define XAE_TX_BUFFERS 64
+#define XAE_MAX_PKT_LEN 8192
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
- * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
+ * @reserved1: Reserved and not used for 32-bit
* @phys: MM2S/S2MM Buffer Address
- * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
+ * @reserved2: Reserved and not used for 32-bit
* @reserved3: Reserved and not used
* @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value
@@ -354,12 +495,23 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
+ * @sw_id_offset: MM2S/S2MM Sw ID
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
*/
struct axidma_bd {
- u32 next; /* Physical address of next buffer descriptor */
- u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
- u32 phys;
- u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved1;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved2;
+#endif
u32 reserved3;
u32 reserved4;
u32 cntrl;
@@ -368,10 +520,92 @@ struct axidma_bd {
u32 app1; /* TX start << 16 | insert */
u32 app2; /* TX csum seed */
u32 app3;
- u32 app4; /* Last field used by HW */
- struct sk_buff *skb;
+ u32 app4;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
+} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
+/**
+ * struct aximcdma_bd - Axi MCDMA buffer descriptor layout
+ * @next: MM2S/S2MM Next Descriptor Pointer
+ * @reserved1: Reserved and not used for 32-bit
+ * @phys: MM2S/S2MM Buffer Address
+ * @reserved2: Reserved and not used for 32-bit
+ * @reserved3: Reserved and not used
+ * @cntrl: MM2S/S2MM Control value
+ * @status: S2MM Status value
+ * @sband_stats: S2MM Sideband Status value
+ * MM2S Status value
+ * @app0: MM2S/S2MM User Application Field 0.
+ * @app1: MM2S/S2MM User Application Field 1.
+ * @app2: MM2S/S2MM User Application Field 2.
+ * @app3: MM2S/S2MM User Application Field 3.
+ * @app4: MM2S/S2MM User Application Field 4.
+ * @sw_id_offset: MM2S/S2MM Sw ID
+ * @ptp_tx_skb: If timestamping is enabled used for timestamping skb
+ * Otherwise reserved.
+ * @ptp_tx_ts_tag: Tag value of 2 step timestamping if timestamping is enabled
+ * Otherwise reserved.
+ * @tx_skb: Transmit skb address
+ * @tx_desc_mapping: Tx Descriptor DMA mapping type.
+ */
+struct aximcdma_bd {
+ phys_addr_t next; /* Physical address of next buffer descriptor */
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved1;
+#endif
+ phys_addr_t phys;
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
+ u32 reserved2;
+#endif
+ u32 reserved3;
+ u32 cntrl;
+ u32 status;
+ u32 sband_stats;
+ u32 app0;
+ u32 app1; /* TX start << 16 | insert */
+ u32 app2; /* TX csum seed */
+ u32 app3;
+ u32 app4;
+ phys_addr_t sw_id_offset; /* first unused field by h/w */
+ phys_addr_t ptp_tx_skb;
+ u32 ptp_tx_ts_tag;
+ phys_addr_t tx_skb;
+ u32 tx_desc_mapping;
} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
+#if defined(CONFIG_XILINX_TSN)
+#define XAE_MAX_QUEUES 5
+#elif defined(CONFIG_AXIENET_HAS_MCDMA)
+#define XAE_MAX_QUEUES 16
+#else
+#define XAE_MAX_QUEUES 1
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+/* TSN queues range is 2 to 5. For eg: for num_tc = 2 minimum queues = 2;
+ * for num_tc = 3 with sideband signalling maximum queues = 5
+ */
+#define XAE_MAX_TSN_TC 3
+#define XAE_TSN_MIN_QUEUES 2
+#endif
+
+enum axienet_tsn_ioctl {
+ SIOCCHIOCTL = SIOCDEVPRIVATE,
+ SIOC_GET_SCHED,
+ SIOC_PREEMPTION_CFG,
+ SIOC_PREEMPTION_CTRL,
+ SIOC_PREEMPTION_STS,
+ SIOC_PREEMPTION_COUNTER,
+ SIOC_QBU_USER_OVERRIDE,
+ SIOC_QBU_STS,
+};
+
/**
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
@@ -380,25 +614,31 @@ struct axidma_bd {
* @mii_bus: Pointer to MII bus structure
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
- * @dma_regs: Base address for the axidma device address space
- * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
- * @tx_irq: Axidma TX IRQ number
- * @rx_irq: Axidma RX IRQ number
+ * @mcdma_regs: Base address for the aximcdma device address space
+ * @napi: Napi Structure array for all dma queues
+ * @num_tx_queues: Total number of Tx DMA queues
+ * @num_rx_queues: Total number of Rx DMA queues
+ * @dq: DMA queues data
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
+ * @is_tsn: Denotes a tsn port
+ * @temac_no: Denotes the port number in TSN IP
+ * @num_tc: Total number of TSN Traffic classes
+ * @timer_priv: PTP timer private data pointer
+ * @ptp_tx_irq: PTP tx irq
+ * @ptp_rx_irq: PTP rx irq
+ * @rtc_irq: PTP RTC irq
+ * @qbv_irq: QBV shed irq
+ * @ptp_ts_type: ptp time stamp type - 1 or 2 step mode
+ * @ptp_rx_hw_pointer: ptp rx hw pointer
+ * @ptp_rx_sw_pointer: ptp rx sw pointer
+ * @ptp_txq: PTP tx queue header
+ * @tx_tstamp_work: PTP timestamping work queue
+ * @ptp_tx_lock: PTP tx lock
+ * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
+ * @eth_irq: Axi Ethernet IRQ number
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -408,6 +648,28 @@ struct axidma_bd {
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @phy_interface: Phy interface type.
+ * @phy_flags: Phy interface flags.
+ * @eth_hasnobuf: Ethernet is configured in Non buf mode.
+ * @eth_hasptp: Ethernet is configured for ptp.
+ * @axienet_config: Ethernet config structure
+ * @tx_ts_regs: Base address for the axififo device address space.
+ * @rx_ts_regs: Base address for the rx axififo device address space.
+ * @tstamp_config: Hardware timestamp config structure.
+ * @tx_ptpheader: Stores the tx ptp header.
+ * @aclk: AXI4-Lite clock for ethernet and dma.
+ * @eth_sclk: AXI4-Stream interface clock.
+ * @eth_refclk: Stable clock used by signal delay primitives and transceivers.
+ * @eth_dclk: Dynamic Reconfiguration Port(DRP) clock.
+ * @dma_sg_clk: DMA Scatter Gather Clock.
+ * @dma_rx_clk: DMA S2MM Primary Clock.
+ * @dma_tx_clk: DMA MM2S Primary Clock.
+ * @qnum: Axi Ethernet queue number to be operate on.
+ * @chan_num: MCDMA Channel number to be operate on.
+ * @chan_id: MCMDA Channel id used in conjunction with weight parameter.
+ * @weight: MCDMA Channel weight value to be configured for.
+ * @dma_mask: Specify the width of the DMA address space.
+ * @usxgmii_rate: USXGMII PHY speed.
*/
struct axienet_local {
struct net_device *ndev;
@@ -416,9 +678,6 @@ struct axienet_local {
/* Connection to PHY device */
struct device_node *phy_node;
- struct phylink *phylink;
- struct phylink_config phylink_config;
-
/* Clock for AXI bus */
struct clk *clk;
@@ -428,28 +687,45 @@ struct axienet_local {
/* IO registers, dma functions and IRQs */
resource_size_t regs_start;
void __iomem *regs;
- void __iomem *dma_regs;
+ void __iomem *mcdma_regs;
- struct work_struct dma_err_task;
+ struct tasklet_struct dma_err_tasklet[XAE_MAX_QUEUES];
+ struct napi_struct napi[XAE_MAX_QUEUES]; /* NAPI Structure */
+
+ #define XAE_TEMAC1 0
+ #define XAE_TEMAC2 1
+ u8 temac_no;
+ u16 num_tx_queues; /* Number of TX DMA queues */
+ u16 num_rx_queues; /* Number of RX DMA queues */
+ struct axienet_dma_q *dq[XAE_MAX_QUEUES]; /* DMA queue data*/
- int tx_irq;
- int rx_irq;
- int eth_irq;
phy_interface_t phy_mode;
+ bool is_tsn;
+#ifdef CONFIG_XILINX_TSN
+ u16 num_tc;
+#ifdef CONFIG_XILINX_TSN_PTP
+ void *timer_priv;
+ int ptp_tx_irq;
+ int ptp_rx_irq;
+ int rtc_irq;
+ int qbv_irq;
+ int ptp_ts_type;
+ u8 ptp_rx_hw_pointer;
+ u8 ptp_rx_sw_pointer;
+ struct sk_buff_head ptp_txq;
+ struct work_struct tx_tstamp_work;
+ spinlock_t ptp_tx_lock; /* TSN PTP tx lock*/
+#endif
+#endif
+ int eth_irq;
+
u32 options; /* Current options word */
+ u32 last_link;
u32 features;
- /* Buffer descriptors */
- struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
u32 tx_bd_num;
- struct axidma_bd *rx_bd_v;
- dma_addr_t rx_bd_p;
u32 rx_bd_num;
- u32 tx_bd_ci;
- u32 tx_bd_tail;
- u32 rx_bd_ci;
u32 max_frm_size;
u32 rxmem;
@@ -459,6 +735,134 @@ struct axienet_local {
u32 coalesce_count_rx;
u32 coalesce_count_tx;
+ u32 phy_interface;
+ u32 phy_flags;
+ bool eth_hasnobuf;
+ bool eth_hasptp;
+ const struct axienet_config *axienet_config;
+
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
+ void __iomem *tx_ts_regs;
+ void __iomem *rx_ts_regs;
+ struct hwtstamp_config tstamp_config;
+ u8 *tx_ptpheader;
+#endif
+ struct clk *aclk;
+ struct clk *eth_sclk;
+ struct clk *eth_refclk;
+ struct clk *eth_dclk;
+ struct clk *dma_sg_clk;
+ struct clk *dma_rx_clk;
+ struct clk *dma_tx_clk;
+
+ /* MCDMA Fields */
+ int qnum[XAE_MAX_QUEUES];
+ int chan_num[XAE_MAX_QUEUES];
+ /* WRR Fields */
+ u16 chan_id;
+ u16 weight;
+
+ u8 dma_mask;
+ u32 usxgmii_rate;
+};
+
+/**
+ * struct axienet_dma_q - axienet private per dma queue data
+ * @lp: Parent pointer
+ * @dma_regs: Base address for the axidma device address space
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @tx_lock: Spin lock for tx path
+ * @rx_lock: Spin lock for tx path
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @tx_buf: Virtual address of the Tx buffer pool used by the driver when
+ * DMA h/w is configured without DRE.
+ * @tx_bufs: Virutal address of the Tx buffer address.
+ * @tx_bufs_dma: Physical address of the Tx buffer address used by the driver
+ * when DMA h/w is configured without DRE.
+ * @eth_hasdre: Tells whether DMA h/w is configured with dre or not.
+ * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while alloc. BDs before a TX starts
+ * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
+ * accessed currently. Used while processing BDs after the TX
+ * completed.
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @chan_id: MCDMA channel to operate on.
+ * @rx_offset: MCDMA S2MM channel starting offset.
+ * @txq_bd_v: Virtual address of the MCDMA TX buffer descriptor ring
+ * @rxq_bd_v: Virtual address of the MCDMA RX buffer descriptor ring
+ * @tx_packets: Number of transmit packets processed by the dma queue.
+ * @tx_bytes: Number of transmit bytes processed by the dma queue.
+ * @rx_packets: Number of receive packets processed by the dma queue.
+ * @rx_bytes: Number of receive bytes processed by the dma queue.
+ */
+struct axienet_dma_q {
+ struct axienet_local *lp; /* parent */
+ void __iomem *dma_regs;
+
+ int tx_irq;
+ int rx_irq;
+
+ spinlock_t tx_lock; /* tx lock */
+ spinlock_t rx_lock; /* rx lock */
+
+ /* Buffer descriptors */
+ struct axidma_bd *tx_bd_v;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ dma_addr_t tx_bd_p;
+
+ unsigned char *tx_buf[XAE_TX_BUFFERS];
+ unsigned char *tx_bufs;
+ dma_addr_t tx_bufs_dma;
+ bool eth_hasdre;
+
+ u32 tx_bd_ci;
+ u32 rx_bd_ci;
+ u32 tx_bd_tail;
+
+ /* MCDMA fields */
+ u16 chan_id;
+ u32 rx_offset;
+ struct aximcdma_bd *txq_bd_v;
+ struct aximcdma_bd *rxq_bd_v;
+
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+};
+
+#define AXIENET_TX_SSTATS_LEN(lp) ((lp)->num_tx_queues * 2)
+#define AXIENET_RX_SSTATS_LEN(lp) ((lp)->num_rx_queues * 2)
+
+/**
+ * enum axienet_ip_type - AXIENET IP/MAC type.
+ *
+ * @XAXIENET_1G: IP is 1G MAC
+ * @XAXIENET_2_5G: IP type is 2.5G MAC.
+ * @XAXIENET_LEGACY_10G: IP type is legacy 10G MAC.
+ * @XAXIENET_10G_25G: IP type is 10G/25G MAC(XXV MAC).
+ *
+ */
+enum axienet_ip_type {
+ XAXIENET_1G = 0,
+ XAXIENET_2_5G,
+ XAXIENET_LEGACY_10G,
+ XAXIENET_10G_25G,
+};
+
+struct axienet_config {
+ enum axienet_ip_type mactype;
+ void (*setoptions)(struct net_device *ndev, u32 options);
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_aclk,
+ struct clk **axis_clk, struct clk **ref_clk,
+ struct clk **dclk);
+ u32 tx_ptplen;
};
/**
@@ -473,6 +877,12 @@ struct axienet_option {
u32 m_or;
};
+struct xxvenet_option {
+ u32 opt;
+ u32 reg;
+ u32 m_or;
+};
+
/**
* axienet_ior - Memory mapped Axi Ethernet register read
* @lp: Pointer to axienet local structure
@@ -507,10 +917,183 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
iowrite32(value, lp->regs + offset);
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_txts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_txts_ior(struct axienet_local *lp, off_t reg)
+{
+ return ioread32(lp->tx_ts_regs + reg);
+}
+
+/**
+ * axienet_txts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_txts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ iowrite32(value, (lp->tx_ts_regs + reg));
+}
+
+/**
+ * axienet_rxts_ior - Memory mapped AXI FIFO MM S register read
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core
+ *
+ * Return: the contents of the AXI FIFO MM S register
+ */
+
+static inline u32 axienet_rxts_ior(struct axienet_local *lp, off_t reg)
+{
+ return ioread32(lp->rx_ts_regs + reg);
+}
+
+/**
+ * axienet_rxts_iow - Memory mapper AXI FIFO MM S register write
+ * @lp: Pointer to axienet_local structure
+ * @reg: Address offset from the base address of AXI FIFO MM S
+ * core.
+ * @value: Value to be written into the AXI FIFO MM S register
+ */
+static inline void axienet_rxts_iow(struct axienet_local *lp, off_t reg,
+ u32 value)
+{
+ iowrite32(value, (lp->rx_ts_regs + reg));
+}
+#endif
+
+/**
+ * axienet_dma_in32 - Memory mapped Axi DMA register read
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ *
+ * Return: The contents of the Axi DMA register
+ *
+ * This function returns the contents of the corresponding Axi DMA register.
+ */
+static inline u32 axienet_dma_in32(struct axienet_dma_q *q, off_t reg)
+{
+ return ioread32(q->dma_regs + reg);
+}
+
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out32(struct axienet_dma_q *q,
+ off_t reg, u32 value)
+{
+ iowrite32(value, q->dma_regs + reg);
+}
+
+/**
+ * axienet_dma_bdout - Memory mapped Axi DMA register Buffer Descriptor write.
+ * @q: Pointer to DMA queue structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_bdout(struct axienet_dma_q *q,
+ off_t reg, dma_addr_t value)
+{
+#if defined(CONFIG_PHYS_ADDR_T_64BIT)
+ writeq(value, (q->dma_regs + reg));
+#else
+ writel(value, (q->dma_regs + reg));
+#endif
+}
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_enable(struct axienet_local *lp);
void axienet_mdio_disable(struct axienet_local *lp);
int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
+#ifdef CONFIG_XILINX_TSN_PTP
+void axienet_tx_tstamp(struct work_struct *work);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbv_init(struct net_device *ndev);
+void axienet_qbv_remove(struct net_device *ndev);
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr);
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr);
+#endif
+
+#ifdef CONFIG_XILINX_TSN_QBR
+int axienet_preemption(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr);
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr);
+#ifdef CONFIG_XILINX_TSN_QBV
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr);
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr);
+#endif
+#endif
+
+int axienet_mdio_wait_until_ready(struct axienet_local *lp);
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void axienet_dma_err_handler(unsigned long data);
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev);
+void axienet_start_xmit_done(struct net_device *ndev, struct axienet_dma_q *q);
+void axienet_dma_bd_release(struct net_device *ndev);
+int __axienet_device_reset(struct axienet_dma_q *q);
+void axienet_set_mac_address(struct net_device *ndev, const void *address);
+void axienet_set_multicast_list(struct net_device *ndev);
+int xaxienet_rx_poll(struct napi_struct *napi, int quota);
+
+#if defined(CONFIG_AXIENET_HAS_MCDMA)
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q);
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev);
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev);
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data);
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data);
+int axienet_sset_count(struct net_device *ndev, int sset);
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data);
+int axeinet_mcdma_create_sysfs(struct kobject *kobj);
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj);
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp);
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev);
+#endif
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p);
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p);
+#endif
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
new file mode 100644
index 000000000000..902d88ec8a6d
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_dma.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (DMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI DMA TX and RX programming.
+ */
+
+#include "xilinx_axienet.h"
+
+/**
+ * axienet_bd_free - Release buffer descriptor rings for individual dma queue
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is helper function to axienet_dma_bd_release.
+ */
+
+void __maybe_unused axienet_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rx_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rx_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * lp->rx_bd_num,
+ q->rx_bd_v,
+ q->rx_bd_p);
+ }
+ if (q->tx_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * lp->tx_bd_num,
+ q->tx_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * __dma_txq_init - Setup buffer descriptor rings for individual Axi DMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_txq_init(struct net_device *ndev, struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->tx_bd_v) * lp->tx_bd_num,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->tx_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ q->tx_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+ }
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * __dma_rxq_init - Setup buffer descriptor rings for individual Axi DMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_q_init
+ */
+static int __dma_rxq_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ u32 cr;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+ /* Reset the indexes which are used for accessing the BDs */
+ q->rx_bd_ci = 0;
+
+ /* Allocate the Rx buffer descriptors. */
+ q->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rx_bd_v) * lp->rx_bd_num,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rx_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ q->rx_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rx_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rx_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ return 0;
+out:
+ return -ENOMEM;
+}
+
+/**
+ * axienet_dma_q_init - Setup buffer descriptor rings for individual Axi DMA
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ if (__dma_txq_init(ndev, q))
+ goto out;
+
+ if (__dma_rxq_init(ndev, q))
+ goto out;
+
+ return 0;
+out:
+ axienet_dma_bd_release(ndev);
+ return -ENOMEM;
+}
+
+/**
+ * map_dma_q_irq - Map dma q based on interrupt number.
+ * @irq: irq number
+ * @lp: axienet local structure
+ *
+ * Return: DMA queue.
+ *
+ * This returns the DMA number on which interrupt has occurred.
+ */
+static int map_dma_q_irq(int irq, struct axienet_local *lp)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (irq == lp->dq[i]->tx_irq || irq == lp->dq[i]->rx_irq)
+ return i;
+ }
+ pr_err("Error mapping DMA irq\n");
+ return -ENODEV;
+}
+
+/**
+ * axienet_tx_irq - Tx Done Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
+ *
+ * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
+ * to complete the BD processing.
+ */
+irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->tx_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_rx_irq - Rx Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
+ *
+ * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * processing.
+ */
+irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i = map_dma_q_irq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (i < 0)
+ return IRQ_NONE;
+
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XAXIDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rx_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XAXIDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_dma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct axidma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ axienet_mdio_wait_until_ready(lp);
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ }
+
+ __axienet_device_reset(q);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_mdio_enable(lp);
+ axienet_mdio_wait_until_ready(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ cur_p = &q->tx_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ cur_p = &q->rx_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
+ (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XAXIDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Rx channel control register */
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
+ (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XAXIDMA_IRQ_ALL_MASK;
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
+ (sizeof(*q->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting
+ */
+ axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
+ cr | XAXIDMA_CR_RUNSTOP_MASK);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fa5dc2993520..0d71dad20739 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -23,6 +23,7 @@
*/
#include <linux/clk.h>
+#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -37,9 +38,20 @@
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/random.h>
+#include <net/sock.h>
+#include <linux/xilinx_phy.h>
+#include <linux/clk.h>
#include "xilinx_axienet.h"
+#ifdef CONFIG_XILINX_TSN_PTP
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#endif
/* Descriptors defines for Tx and Rx DMA */
#define TX_BD_NUM_DEFAULT 64
#define RX_BD_NUM_DEFAULT 1024
@@ -52,16 +64,14 @@
#define DRIVER_VERSION "1.00a"
#define AXIENET_REGS_N 40
+#define AXIENET_TS_HEADER_LEN 8
+#define XXVENET_TS_HEADER_LEN 4
+#define NS_PER_SEC 1000000000ULL /* Nanoseconds per second */
-/* Match table for of_platform binding */
-static const struct of_device_id axienet_of_match[] = {
- { .compatible = "xlnx,axi-ethernet-1.00.a", },
- { .compatible = "xlnx,axi-ethernet-1.01.a", },
- { .compatible = "xlnx,axi-ethernet-2.01.a", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, axienet_of_match);
+#ifdef CONFIG_XILINX_TSN_PTP
+int axienet_phc_index = -1;
+EXPORT_SYMBOL(axienet_phc_index);
+#endif
/* Option table for setting up Axi Ethernet hardware options */
static struct axienet_option axienet_options[] = {
@@ -104,8 +114,8 @@ static struct axienet_option axienet_options[] = {
.m_or = XAE_FCC_FCTX_MASK,
}, { /* Turn on promiscuous frame filtering */
.opt = XAE_OPTION_PROMISC,
- .reg = XAE_FMI_OFFSET,
- .m_or = XAE_FMI_PM_MASK,
+ .reg = XAE_FMC_OFFSET,
+ .m_or = XAE_FMC_PM_MASK,
}, { /* Enable transmitter */
.opt = XAE_OPTION_TXEN,
.reg = XAE_TC_OFFSET,
@@ -118,62 +128,27 @@ static struct axienet_option axienet_options[] = {
{}
};
-/**
- * axienet_dma_in32 - Memory mapped Axi DMA register read
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- *
- * Return: The contents of the Axi DMA register
- *
- * This function returns the contents of the corresponding Axi DMA register.
- */
-static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
-{
- return ioread32(lp->dma_regs + reg);
-}
-
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- iowrite32(value, lp->dma_regs + reg);
-}
-
-static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
-{
- axienet_dma_out32(lp, reg, lower_32_bits(addr));
-
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
-}
-
-static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
- struct axidma_bd *desc)
-{
- desc->phys = lower_32_bits(addr);
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- desc->phys_msb = upper_32_bits(addr);
-}
-
-static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
- struct axidma_bd *desc)
-{
- dma_addr_t ret = desc->phys;
-
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
-
- return ret;
-}
+/* Option table for setting up Axi Ethernet hardware options */
+static struct xxvenet_option xxvenet_options[] = {
+ { /* Turn on FCS stripping on receive packets */
+ .opt = XAE_OPTION_FCS_STRIP,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_FCS_MASK,
+ }, { /* Turn on FCS insertion on transmit packets */
+ .opt = XAE_OPTION_FCS_INSERT,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_FCS_MASK,
+ }, { /* Enable transmitter */
+ .opt = XAE_OPTION_TXEN,
+ .reg = XXV_TC_OFFSET,
+ .m_or = XXV_TC_TX_MASK,
+ }, { /* Enable receiver */
+ .opt = XAE_OPTION_RXEN,
+ .reg = XXV_RCW1_OFFSET,
+ .m_or = XXV_RCW1_RX_MASK,
+ },
+ {}
+};
/**
* axienet_dma_bd_release - Release buffer descriptor rings
@@ -183,46 +158,23 @@ static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
* axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
* driver stop api is called.
*/
-static void axienet_dma_bd_release(struct net_device *ndev)
+void axienet_dma_bd_release(struct net_device *ndev)
{
int i;
struct axienet_local *lp = netdev_priv(ndev);
- /* If we end up here, tx_bd_v must have been DMA allocated. */
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
-
- if (!lp->rx_bd_v)
- return;
-
- for (i = 0; i < lp->rx_bd_num; i++) {
- dma_addr_t phys;
-
- /* A NULL skb means this descriptor has not been initialised
- * at all.
- */
- if (!lp->rx_bd_v[i].skb)
- break;
-
- dev_kfree_skb(lp->rx_bd_v[i].skb);
-
- /* For each descriptor, we programmed cntrl with the (non-zero)
- * descriptor size, after it had been successfully allocated.
- * So a non-zero value in there means we need to unmap it.
- */
- if (lp->rx_bd_v[i].cntrl) {
- phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
- dma_unmap_single(ndev->dev.parent, phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
- }
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
+ }
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+#else
+ axienet_bd_free(ndev, lp->dq[i]);
+#endif
}
-
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
}
/**
@@ -237,113 +189,29 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
- int i;
- struct sk_buff *skb;
+ int i, ret;
struct axienet_local *lp = netdev_priv(ndev);
- /* Reset the indexes which are used for accessing the BDs */
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v)
- return -ENOMEM;
-
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v)
- goto out;
-
- for (i = 0; i < lp->tx_bd_num; i++) {
- dma_addr_t addr = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
-
- lp->tx_bd_v[i].next = lower_32_bits(addr);
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ for_each_tx_dma_queue(lp, i) {
+ ret = axienet_mcdma_tx_q_init(ndev, lp->dq[i]);
+ if (ret != 0)
+ break;
}
-
- for (i = 0; i < lp->rx_bd_num; i++) {
- dma_addr_t addr;
-
- addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
- lp->rx_bd_v[i].next = lower_32_bits(addr);
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
-
- skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb)
- goto out;
-
- lp->rx_bd_v[i].skb = skb;
- addr = dma_map_single(ndev->dev.parent, skb->data,
- lp->max_frm_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, addr)) {
- netdev_err(ndev, "DMA mapping error\n");
- goto out;
+#endif
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_rx_q_init(ndev, lp->dq[i]);
+#else
+ ret = axienet_dma_q_init(ndev, lp->dq[i]);
+#endif
+ if (ret != 0) {
+ netdev_err(ndev, "%s: Failed to init DMA buf\n", __func__);
+ break;
}
- desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
-
- lp->rx_bd_v[i].cntrl = lp->max_frm_size;
- }
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
-
- return 0;
-out:
- axienet_dma_bd_release(ndev);
- return -ENOMEM;
+ }
+
+ return ret;
}
/**
@@ -354,16 +222,20 @@ out:
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It writes to the UAW0 and UAW1 registers of the core.
*/
-static void axienet_set_mac_address(struct net_device *ndev,
- const void *address)
+void axienet_set_mac_address(struct net_device *ndev,
+ const void *address)
{
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ ether_addr_copy(ndev->dev_addr, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
+ if (lp->axienet_config->mactype != XAXIENET_1G &&
+ lp->axienet_config->mactype != XAXIENET_2_5G)
+ return;
+
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
@@ -391,6 +263,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
+
axienet_set_mac_address(ndev, addr->sa_data);
return 0;
}
@@ -406,12 +279,15 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
* means whenever the multicast table entries need to be updated this
* function gets called.
*/
-static void axienet_set_multicast_list(struct net_device *ndev)
+void axienet_set_multicast_list(struct net_device *ndev)
{
int i;
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
+ if ((lp->axienet_config->mactype != XAXIENET_1G) || lp->eth_hasnobuf)
+ return;
+
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
@@ -419,9 +295,9 @@ static void axienet_set_multicast_list(struct net_device *ndev)
* the flag is already set. If not we set it.
*/
ndev->flags |= IFF_PROMISC;
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg |= XAE_FMI_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg |= XAE_FMC_PM_MASK;
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -439,25 +315,25 @@ static void axienet_set_multicast_list(struct net_device *ndev)
af1reg = (ha->addr[4]);
af1reg |= (ha->addr[5] << 8);
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
i++;
}
} else {
- reg = axienet_ior(lp, XAE_FMI_OFFSET);
- reg &= ~XAE_FMI_PM_MASK;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET);
+ reg &= ~XAE_FMC_PM_MASK;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg = axienet_ior(lp, XAE_FMC_OFFSET) & 0xFFFFFF00;
reg |= i;
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FMC_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, 0);
axienet_iow(lp, XAE_AF1_OFFSET, 0);
}
@@ -494,7 +370,24 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static int __axienet_device_reset(struct axienet_local *lp)
+static void xxvenet_setoptions(struct net_device *ndev, u32 options)
+{
+ int reg;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct xxvenet_option *tp = &xxvenet_options[0];
+
+ while (tp->opt) {
+ reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
+ if (options & tp->opt)
+ reg |= tp->m_or;
+ axienet_iow(lp, tp->reg, reg);
+ tp++;
+ }
+
+ lp->options |= options;
+}
+
+int __axienet_device_reset(struct axienet_dma_q *q)
{
u32 timeout;
@@ -505,13 +398,13 @@ static int __axienet_device_reset(struct axienet_local *lp)
* Note that even though both TX and RX have their own reset register,
* they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
+ while (axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET) &
XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ netdev_err(q->lp->ndev, "%s: DMA reset timeout!\n",
__func__);
return -ETIMEDOUT;
}
@@ -536,52 +429,107 @@ static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
+ u32 err, val;
+
+ struct axienet_dma_q *q;
+ u32 i;
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Reset the XXV MAC */
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val |= XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ /* Wait for 1ms for GT reset to complete as per spec */
+ mdelay(1);
+ val = axienet_ior(lp, XXV_GT_RESET_OFFSET);
+ val &= ~XXV_GT_RESET_MASK;
+ axienet_iow(lp, XXV_GT_RESET_OFFSET, val);
+ }
- ret = __axienet_device_reset(lp);
- if (ret)
- return ret;
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ __axienet_device_reset(q);
+#ifndef CONFIG_AXIENET_HAS_MCDMA
+ __axienet_device_reset(q);
+#endif
+ }
+ }
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
- lp->options |= XAE_OPTION_VLAN;
- lp->options &= (~XAE_OPTION_JUMBO);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_VLAN;
+ lp->options &= (~XAE_OPTION_JUMBO);
+ }
- if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ if ((ndev->mtu > XAE_MTU) && (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
-
- if (lp->max_frm_size <= lp->rxmem)
+ if (lp->max_frm_size <= lp->rxmem &&
+ (lp->axienet_config->mactype != XAXIENET_10G_25G))
lp->options |= XAE_OPTION_JUMBO;
}
- ret = axienet_dma_bd_init(ndev);
- if (ret) {
- netdev_err(ndev, "%s: descriptor allocation failed\n",
- __func__);
- return ret;
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ if (axienet_dma_bd_init(ndev)) {
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
+ }
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
}
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Check for block lock bit got set or not
+ * This ensures that 10G ethernet IP
+ * is functioning normally or not.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ val, (val & XXV_RX_BLKLCK_MASK),
+ 10, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "XXV MAC block lock not complete! Cross-check the MAC ref clock configuration\n");
+ }
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_rxts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_RDFR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_SRR,
+ XAXIFIFO_TXTS_RESET_MASK);
+ }
+#endif
+ }
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
- axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
- XAE_INT_RECV_ERROR_MASK : 0);
+ if ((lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ /* Enable receive erros */
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
+ }
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ lp->options |= XAE_OPTION_FCS_STRIP;
+ lp->options |= XAE_OPTION_FCS_INSERT;
+ } else {
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+ }
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
- */
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ lp->axienet_config->setoptions(ndev, lp->options);
netif_trans_update(ndev);
@@ -601,35 +549,53 @@ static int axienet_device_reset(struct net_device *ndev)
* Returns the number of descriptors handled.
*/
static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
- int nr_bds, u32 *sizep)
+ int nr_bds, u32 *sizep, struct axienet_dma_q *q)
{
struct axienet_local *lp = netdev_priv(ndev);
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
int max_bds = nr_bds;
unsigned int status;
- dma_addr_t phys;
int i;
if (max_bds == -1)
max_bds = lp->tx_bd_num;
for (i = 0; i < max_bds; i++) {
- cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_ci];
+ status = cur_p->sband_stats;
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_ci];
status = cur_p->status;
-
+#endif
/* If no number is given, clean up *all* descriptors that have
* been completed by the MAC.
*/
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (cur_p->ptp_tx_skb)
+ axienet_tx_hwtstamp(lp, cur_p);
+#endif
+ if (cur_p->tx_desc_mapping == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK,
+ DMA_TO_DEVICE);
- if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
- dev_consume_skb_irq(cur_p->skb);
+ if (cur_p->tx_skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
cur_p->cntrl = 0;
cur_p->app0 = 0;
@@ -637,7 +603,10 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
- cur_p->skb = NULL;
+ cur_p->tx_skb = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->sband_stats = 0;
+#endif
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,9 +616,201 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
+ * axienet_adjust_link - Adjust the PHY link speed/duplex.
+ * @ndev: Pointer to the net_device structure
+ *
+ * This function is called to change the speed and duplex setting after
+ * auto negotiation is done by the PHY. This is the function that gets
+ * registered with the PHY interface through the "of_phy_connect" call.
+ */
+static void axienet_adjust_link(struct net_device *ndev)
+{
+ u32 emmc_reg;
+ u32 link_state;
+ u32 setspeed = 1;
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = ndev->phydev;
+
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+ if (lp->last_link != link_state) {
+ if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
+ if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ setspeed = 0;
+ } else {
+ if ((phy->speed == SPEED_1000) &&
+ (lp->phy_mode == PHY_INTERFACE_MODE_MII))
+ setspeed = 0;
+ }
+
+ if (setspeed == 1) {
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (phy->speed) {
+ case SPEED_2500:
+ emmc_reg |= XAE_EMMC_LINKSPD_2500;
+ break;
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev, "Speed other than 10, 100 ");
+ dev_err(&ndev->dev, "or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+ phy_print_status(phy);
+ } else {
+ netdev_err(ndev,
+ "Error setting Axi Ethernet mac speed\n");
+ }
+
+ lp->last_link = link_state;
+ }
+}
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+/**
+ * axienet_tx_hwtstamp - Read tx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @cur_p: Pointer to the axi_dma/axi_mcdma current bd
+ *
+ * Return: None.
+ */
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct aximcdma_bd *cur_p)
+#else
+void axienet_tx_hwtstamp(struct axienet_local *lp,
+ struct axidma_bd *cur_p)
+#endif
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ u32 count, len = lp->axienet_config->tx_ptplen;
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps((struct sk_buff *)cur_p->ptp_tx_skb);
+
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK)))
+ dev_info(lp->dev, "Did't get FIFO tx interrupt %d\n", val);
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->tx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >=
+ len), 0, 1000000);
+ if (err)
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >> XAXIFIFO_TXTS_TAG_SHIFT);
+ dev_dbg(lp->dev, "tx_stamp:[%04x] %04x %u %9u\n",
+ cur_p->ptp_tx_ts_tag, val, sec, nsec);
+
+ if (val != cur_p->ptp_tx_ts_tag) {
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ while (count) {
+ nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = ((val & XAXIFIFO_TXTS_TAG_MASK) >>
+ XAXIFIFO_TXTS_TAG_SHIFT);
+
+ dev_dbg(lp->dev, "tx_stamp:[%04x] %04x %u %9u\n",
+ cur_p->ptp_tx_ts_tag, val, sec, nsec);
+ if (val == cur_p->ptp_tx_ts_tag)
+ break;
+ count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
+ }
+ if (val != cur_p->ptp_tx_ts_tag) {
+ dev_info(lp->dev, "Mismatching 2-step tag. Got %x",
+ val);
+ dev_info(lp->dev, "Expected %x\n",
+ cur_p->ptp_tx_ts_tag);
+ }
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ time64 = sec * NS_PER_SEC + nsec;
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ skb_pull((struct sk_buff *)cur_p->ptp_tx_skb,
+ AXIENET_TS_HEADER_LEN);
+
+ skb_tstamp_tx((struct sk_buff *)cur_p->ptp_tx_skb, shhwtstamps);
+ dev_kfree_skb_any((struct sk_buff *)cur_p->ptp_tx_skb);
+ cur_p->ptp_tx_skb = 0;
+}
+
+/**
+ * axienet_rx_hwtstamp - Read rx timestamp from hw and update it to the skbuff
+ * @lp: Pointer to axienet local structure
+ * @skb: Pointer to the sk_buff structure
+ *
+ * Return: None.
+ */
+static void axienet_rx_hwtstamp(struct axienet_local *lp,
+ struct sk_buff *skb)
+{
+ u32 sec = 0, nsec = 0, val;
+ u64 time64;
+ int err = 0;
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_ISR);
+ if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK))) {
+ dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
+ return;
+ }
+
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RFO);
+ if (!val)
+ return;
+
+ /* If FIFO is configured in cut through Mode we will get Rx complete
+ * interrupt even one byte is there in the fifo wait for the full packet
+ */
+ err = readl_poll_timeout_atomic(lp->rx_ts_regs + XAXIFIFO_TXTS_RLR, val,
+ ((val & XAXIFIFO_TXTS_RXFD_MASK) >= 12),
+ 0, 1000000);
+ if (err) {
+ netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
+ __func__);
+ return;
+ }
+
+ nsec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ sec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+ val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
+
+ if (lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ }
+}
+#endif
+
+/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
*
* This function is invoked from the Axi DMA Tx isr to notify the completion
* of transmit operation. It clears fields in the corresponding Tx BDs and
@@ -657,17 +818,18 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
* buffer. It finally invokes "netif_wake_queue" to restart transmission if
* required.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+void axienet_start_xmit_done(struct net_device *ndev,
+ struct axienet_dma_q *q)
{
struct axienet_local *lp = netdev_priv(ndev);
u32 packets = 0;
u32 size = 0;
- packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
+ packets = axienet_free_tx_chain(ndev, q->tx_bd_ci, -1, &size, q);
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci -= lp->tx_bd_num;
+ q->tx_bd_ci += packets;
+ if (q->tx_bd_ci >= lp->tx_bd_num)
+ q->tx_bd_ci -= lp->tx_bd_num;
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
@@ -675,12 +837,17 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */
smp_mb();
- netif_wake_queue(ndev);
+ /* Fixme: With the existing multiqueue implementation
+ * in the driver it is difficult to get the exact queue info.
+ * We should wake only the particular queue
+ * instead of waking all ndev queues.
+ */
+ netif_tx_wake_all_queues(ndev);
}
/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
+ * @q: Pointer to DMA queue structure
* @num_frag: The number of BDs to check for
*
* Return: 0, on success
@@ -691,48 +858,286 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* transmission. If the BD or any of the BDs are not free the function
* returns a busy status. This is invoked from axienet_start_xmit.
*/
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+static inline int axienet_check_tx_bd_space(struct axienet_dma_q *q,
int num_frag)
{
+ struct axienet_local *lp = q->lp;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, lp->tx_bd_num) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->txq_bd_v[(q->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->sband_stats & XMCDMA_BD_STS_ALL_MASK)
+ return NETDEV_TX_BUSY;
+#else
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+
+ if (CIRC_SPACE(q->tx_bd_tail, q->tx_bd_ci, lp->tx_bd_num) < (num_frag + 1))
+ return NETDEV_TX_BUSY;
+
+ cur_p = &q->tx_bd_v[(q->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
+#endif
return 0;
}
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
/**
- * axienet_start_xmit - Starts the transmission.
- * @skb: sk_buff pointer that contains data to be Txed.
- * @ndev: Pointer to net_device structure.
- *
- * Return: NETDEV_TX_OK, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
+ * axienet_create_tsheader - Create timestamp header for tx
+ * @q: Pointer to DMA queue structure
+ * @buf: Pointer to the buf to copy timestamp header
+ * @msg_type: PTP message type
*
- * This function is invoked from upper layers to initiate transmission. The
- * function uses the next available free BDs and populates their fields to
- * start the transmission. Additionally if checksum offloading is supported,
- * it populates AXI Stream Control fields with appropriate values.
+ * Return: None.
*/
-static netdev_tx_t
-axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void axienet_create_tsheader(u8 *buf, u8 msg_type,
+ struct axienet_dma_q *q)
+{
+ struct axienet_local *lp = q->lp;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ u64 val;
+ u32 tmp;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if (msg_type == TX_TS_OP_NOOP) {
+ buf[0] = TX_TS_OP_NOOP;
+ } else if (msg_type == TX_TS_OP_ONESTEP) {
+ buf[0] = TX_TS_OP_ONESTEP;
+ buf[1] = TX_TS_CSUM_UPDATE;
+ buf[4] = TX_PTP_TS_OFFSET;
+ buf[6] = TX_PTP_CSUM_OFFSET;
+ } else {
+ buf[0] = TX_TS_OP_TWOSTEP;
+ buf[2] = cur_p->ptp_tx_ts_tag & 0xFF;
+ buf[3] = (cur_p->ptp_tx_ts_tag >> 8) & 0xFF;
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ memcpy(&val, buf, AXIENET_TS_HEADER_LEN);
+ swab64s(&val);
+ memcpy(buf, &val, AXIENET_TS_HEADER_LEN);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ memcpy(&tmp, buf, XXVENET_TS_HEADER_LEN);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TXFD, tmp);
+ axienet_txts_iow(lp, XAXIFIFO_TXTS_TLR, XXVENET_TS_HEADER_LEN);
+ }
+}
+#endif
+
+#ifdef CONFIG_XILINX_TSN
+static inline u16 get_tsn_queue(u8 pcp, u16 num_tc)
+{
+ u16 queue = 0;
+
+ /* For 3 queue system, RE queue is 1 and ST queue is 2
+ * For 2 queue system, ST queue is 1. BE queue is always 0
+ */
+ if (pcp == 4) {
+ if (num_tc == 2)
+ queue = 1;
+ else
+ queue = 2;
+ } else if ((num_tc == 3) && (pcp == 2 || pcp == 3)) {
+ queue = 1;
+ }
+
+ return queue;
+}
+
+static inline u16 tsn_queue_mapping(const struct sk_buff *skb, u16 num_tc)
+{
+ int queue = 0;
+ u16 vlan_tci;
+ u8 pcp;
+
+ struct ethhdr *hdr = (struct ethhdr *)skb->data;
+ u16 ether_type = ntohs(hdr->h_proto);
+
+ if (unlikely(ether_type == ETH_P_8021Q)) {
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)skb->data;
+
+ /* ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); */
+
+ vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ pr_debug("vlan_tci: %x\n", vlan_tci);
+ pr_debug("pcp: %d\n", pcp);
+
+ queue = get_tsn_queue(pcp, num_tc);
+ }
+ pr_debug("selected queue: %d\n", queue);
+ return queue;
+}
+#endif
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+static int axienet_skb_tstsmp(struct sk_buff **__skb, struct axienet_dma_q *q,
+ struct net_device *ndev)
+{
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
+ struct axidma_bd *cur_p;
+#endif
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct sk_buff *old_skb = *__skb;
+ struct sk_buff *skb = *__skb;
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ if ((((lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) ||
+ (lp->tstamp_config.tx_type == HWTSTAMP_TX_ON)) ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype !=
+ XAXIENET_10G_25G)) {
+ u8 *tmp;
+ struct sk_buff *new_skb;
+
+ if (skb_headroom(old_skb) < AXIENET_TS_HEADER_LEN) {
+ new_skb =
+ skb_realloc_headroom(old_skb,
+ AXIENET_TS_HEADER_LEN);
+ if (!new_skb) {
+ dev_err(&ndev->dev, "failed to allocate new socket buffer\n");
+ dev_kfree_skb_any(old_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Transfer the ownership to the
+ * new socket buffer if required
+ */
+ if (old_skb->sk)
+ skb_set_owner_w(new_skb, old_skb->sk);
+ dev_kfree_skb_any(old_skb);
+ *__skb = new_skb;
+ skb = new_skb;
+ }
+
+ tmp = skb_push(skb, AXIENET_TS_HEADER_LEN);
+ memset(tmp, 0, AXIENET_TS_HEADER_LEN);
+ cur_p->ptp_tx_ts_tag++;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (lp->tstamp_config.tx_type ==
+ HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_ONESTEP
+ , q);
+ } else {
+ axienet_create_tsheader(tmp,
+ TX_TS_OP_TWOSTEP
+ , q);
+ skb_shinfo(skb)->tx_flags
+ |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb =
+ (unsigned long)skb_get(skb);
+ }
+ }
+ } else if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (lp->axienet_config->mactype == XAXIENET_10G_25G)) {
+ cur_p->ptp_tx_ts_tag = (prandom_u32() &
+ ~XAXIFIFO_TXTS_TAG_MASK) + 1;
+ dev_dbg(lp->dev, "tx_tag:[%04x]\n",
+ cur_p->ptp_tx_ts_tag);
+ if (lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_ONESTEP, q);
+ } else {
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_TWOSTEP, q);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cur_p->ptp_tx_skb = (phys_addr_t)skb_get(skb);
+ }
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ dev_dbg(lp->dev, "tx_tag:NOOP\n");
+ axienet_create_tsheader(lp->tx_ptpheader,
+ TX_TS_OP_NOOP, q);
+ }
+
+ return NETDEV_TX_OK;
+}
+#endif
+
+static int axienet_queue_xmit(struct sk_buff *skb,
+ struct net_device *ndev, u16 map)
{
u32 ii;
u32 num_frag;
u32 csum_start_off;
u32 csum_index_off;
- skb_frag_t *frag;
- dma_addr_t tail_p, phys;
+ dma_addr_t tail_p;
struct axienet_local *lp = netdev_priv(ndev);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
+#endif
+ unsigned long flags;
+ struct axienet_dma_q *q;
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ /* Need to manually pad the small frames in case of XXV MAC
+ * because the pad field is not added by the IP. We must present
+ * a packet that meets the minimum length to the IP core.
+ * When the IP core is configured to calculate and add the FCS
+ * to the packet the minimum packet length is 60 bytes.
+ */
+ if (eth_skb_pad(skb)) {
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+#ifdef CONFIG_XILINX_TSN
+ if (unlikely(lp->is_tsn)) {
+ map = tsn_queue_mapping(skb, lp->num_tc);
+#ifdef CONFIG_XILINX_TSN_PTP
+ const struct ethhdr *eth;
+
+ eth = (struct ethhdr *)skb->data;
+ /* check if skb is a PTP frame ? */
+ if (eth->h_proto == htons(ETH_P_1588))
+ return axienet_ptp_xmit(skb, ndev);
+#endif
+ if (lp->temac_no == XAE_TEMAC2) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
+ q = lp->dq[map];
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
+
+ spin_lock_irqsave(&q->tx_lock, flags);
+ if (axienet_check_tx_bd_space(q, num_frag)) {
+ if (netif_queue_stopped(ndev)) {
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_BUSY;
+ }
netif_stop_queue(ndev);
@@ -740,13 +1145,23 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
smp_mb();
/* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
+ if (axienet_check_tx_bd_space(q, num_frag)) {
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_BUSY;
+ }
netif_wake_queue(ndev);
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (axienet_skb_tstsmp(&skb, q, ndev)) {
+ spin_unlock_irqrestore(&q->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+#endif
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2;
@@ -757,66 +1172,120 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 1;
cur_p->app1 = (csum_start_off << 16) | csum_index_off;
}
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY &&
+ !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
- if (net_ratelimit())
- netdev_err(ndev, "TX DMA mapping error\n");
- ndev->stats.tx_dropped++;
- return NETDEV_TX_OK;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = (skb_headlen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK);
+#else
+ cur_p->cntrl = (skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK);
+#endif
+
+ if (!q->eth_hasdre &&
+ (((phys_addr_t)skb->data & 0x3) || num_frag > 0)) {
+ skb_copy_and_csum_dev(skb, q->tx_buf[q->tx_bd_tail]);
+
+ cur_p->phys = q->tx_bufs_dma +
+ (q->tx_buf[q->tx_bd_tail] - q->tx_bufs);
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl = skb_pagelen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK;
+#else
+ cur_p->cntrl = skb_pagelen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+#endif
+ goto out;
+ } else {
+ cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
}
- desc_set_phys_addr(lp, phys, cur_p);
- cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_SINGLE;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ u32 len;
+ skb_frag_t *frag;
+
+ if (++q->tx_bd_tail >= lp->tx_bd_num)
+ q->tx_bd_tail = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->txq_bd_v[q->tx_bd_tail];
+#else
+ cur_p = &q->tx_bd_v[q->tx_bd_tail];
+#endif
frag = &skb_shinfo(skb)->frags[ii];
- phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
- if (net_ratelimit())
- netdev_err(ndev, "TX DMA mapping error\n");
- ndev->stats.tx_dropped++;
- axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
- NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
- return NETDEV_TX_OK;
- }
- desc_set_phys_addr(lp, phys, cur_p);
- cur_p->cntrl = skb_frag_size(frag);
+ len = skb_frag_size(frag);
+ cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, len,
+ DMA_TO_DEVICE);
+ cur_p->cntrl = len;
+ cur_p->tx_desc_mapping = DESC_DMA_MAP_PAGE;
}
+out:
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p->cntrl |= XMCDMA_BD_CTRL_TXEOF_MASK;
+ tail_p = q->tx_bd_p + sizeof(*q->txq_bd_v) * q->tx_bd_tail;
+#else
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->skb = skb;
+ tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
+#endif
+ cur_p->tx_skb = (phys_addr_t)skb;
+ cur_p->tx_skb = (phys_addr_t)skb;
+
+ tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
+ /* Ensure BD write before starting transfer */
+ wmb();
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
- axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id),
+ tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+#endif
+ if (++q->tx_bd_tail >= lp->tx_bd_num)
+ q->tx_bd_tail = 0;
+
+ spin_unlock_irqrestore(&q->tx_lock, flags);
return NETDEV_TX_OK;
}
/**
+ * axienet_start_xmit - Starts the transmission.
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked from upper layers to initiate transmission. The
+ * function uses the next available free BDs and populates their fields to
+ * start the transmission. Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u16 map = skb_get_queue_mapping(skb); /* Single dma queue default*/
+
+ return axienet_queue_xmit(skb, ndev, map);
+}
+
+/**
* axienet_recv - Is called from Axi DMA Rx Isr to complete the received
* BD processing.
* @ndev: Pointer to net_device structure.
+ * @budget: NAPI budget
+ * @q: Pointer to axienet DMA queue structure
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * This function is invoked from the Axi DMA Rx isr(poll) to process the Rx BDs
+ * It does minimal processing and invokes "netif_receive_skb" to complete
+ * further processing.
+ * Return: Number of BD's processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_recv(struct net_device *ndev, int budget,
+ struct axienet_dma_q *q)
{
u32 length;
u32 csumstatus;
@@ -825,30 +1294,87 @@ static void axienet_recv(struct net_device *ndev)
dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ struct aximcdma_bd *cur_p;
+#else
struct axidma_bd *cur_p;
+#endif
+ unsigned int numbdfree = 0;
+
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
-
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- dma_addr_t phys;
-
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ while ((numbdfree < budget) &&
+ (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ new_skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!new_skb) {
+ dev_err(lp->dev, "No memory for new_skb\n");
+ break;
+ }
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tail_p = q->rx_bd_p + sizeof(*q->rxq_bd_v) * q->rx_bd_ci;
+#else
+ tail_p = q->rx_bd_p + sizeof(*q->rx_bd_v) * q->rx_bd_ci;
+#endif
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ lp->max_frm_size,
DMA_FROM_DEVICE);
- skb = cur_p->skb;
- cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
+ skb = (struct sk_buff *)(cur_p->sw_id_offset);
+
+ if (lp->eth_hasnobuf ||
+ (lp->axienet_config->mactype != XAXIENET_1G))
+ length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ else
+ length = cur_p->app4 & 0x0000FFFF;
skb_put(skb, length);
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ if ((lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL ||
+ lp->eth_hasptp) && (lp->axienet_config->mactype != XAXIENET_10G_25G)) {
+ u32 sec, nsec;
+ u64 time64;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ if (lp->axienet_config->mactype == XAXIENET_1G ||
+ lp->axienet_config->mactype == XAXIENET_2_5G) {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&sec, &skb->data[0], 4);
+ memcpy(&nsec, &skb->data[4], 4);
+
+ sec = cpu_to_be32(sec);
+ nsec = cpu_to_be32(nsec);
+ } else {
+ /* The first 8 bytes will be the timestamp */
+ memcpy(&nsec, &skb->data[0], 4);
+ memcpy(&sec, &skb->data[4], 4);
+ }
+
+ /* Remove these 8 bytes from the buffer */
+ skb_pull(skb, 8);
+ time64 = sec * NS_PER_SEC + nsec;
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp = ns_to_ktime(time64);
+ } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ axienet_rx_hwtstamp(lp, skb);
+ }
+ }
+#endif
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
skb->ip_summed = CHECKSUM_NONE;
/* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM &&
+ (lp->axienet_config->mactype == XAXIENET_1G) &&
+ !lp->eth_hasnobuf) {
csumstatus = (cur_p->app2 &
XAE_FULL_CSUM_STATUS_MASK) >> 3;
if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
@@ -857,145 +1383,166 @@ static void axienet_recv(struct net_device *ndev)
}
} else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ skb->len > 64 && !lp->eth_hasnobuf &&
+ (lp->axienet_config->mactype == XAXIENET_1G)) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
- netif_rx(skb);
+ netif_receive_skb(skb);
size += length;
packets++;
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb)
- return;
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
- phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
- desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->skb = new_skb;
+ cur_p->sw_id_offset = (phys_addr_t)new_skb;
- if (++lp->rx_bd_ci >= lp->rx_bd_num)
- lp->rx_bd_ci = 0;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ if (++q->rx_bd_ci >= lp->rx_bd_num)
+ q->rx_bd_ci = 0;
+
+ /* Get relevat BD status value */
+ rmb();
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ cur_p = &q->rxq_bd_v[q->rx_bd_ci];
+#else
+ cur_p = &q->rx_bd_v[q->rx_bd_ci];
+#endif
+ numbdfree++;
}
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
+ q->rx_packets += packets;
+ q->rx_bytes += size;
+
+ if (tail_p) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, tail_p);
+#else
+ axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+#endif
+ }
- if (tail_p)
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ return numbdfree;
}
/**
- * axienet_tx_irq - Tx Done Isr.
- * @irq: irq number
- * @_ndev: net_device pointer
+ * xaxienet_rx_poll - Poll routine for rx packets (NAPI)
+ * @napi: napi structure pointer
+ * @quota: Max number of rx packets to be processed.
*
- * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
+ * This is the poll routine for rx part.
+ * It will process the packets maximux quota value.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * Return: number of packets received
*/
-static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+int xaxienet_rx_poll(struct napi_struct *napi, int quota)
{
- u32 cr;
- unsigned int status;
- struct net_device *ndev = _ndev;
+ struct net_device *ndev = napi->dev;
struct axienet_local *lp = netdev_priv(ndev);
-
- status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
+ int work_done = 0;
+ unsigned int status, cr;
+
+ int map = napi - lp->napi;
+
+ struct axienet_dma_q *q = lp->dq[map];
+
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ spin_lock(&q->rx_lock);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ while ((status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
}
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ spin_unlock(&q->rx_lock);
+#else
+ spin_lock(&q->rx_lock);
+
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ while ((status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) &&
+ (work_done < quota)) {
+ axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
+ if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ dev_err(lp->dev, "Rx error 0x%x\n\r", status);
+ break;
+ }
+ work_done += axienet_recv(lp->ndev, quota - work_done, q);
+ status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
}
-out:
- return IRQ_HANDLED;
+ spin_unlock(&q->rx_lock);
+#endif
+
+ if (work_done < quota) {
+ napi_complete(napi);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET);
+ cr |= (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ XMCDMA_RX_OFFSET, cr);
+#else
+ /* Enable the interrupts again */
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
+#endif
+ }
+
+ return work_done;
}
-/**
- * axienet_rx_irq - Rx Isr.
- * @irq: irq number
- * @_ndev: net_device pointer
- *
- * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
- *
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
- * processing.
- */
-static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+static int axienet_mii_init(struct net_device *ndev)
{
- u32 cr;
- unsigned int status;
- struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
- if (!(status & XAXIDMA_IRQ_ALL_MASK))
- return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- }
-out:
- return IRQ_HANDLED;
+ /* Disable the MDIO interface till Axi Ethernet Reset is completed.
+ * When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ */
+
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ ret = axienet_mdio_wait_until_ready(lp);
+ if (ret < 0)
+ return ret;
+ axienet_mdio_disable(lp);
+ axienet_device_reset(ndev);
+ ret = axienet_mdio_enable(lp);
+ ret = axienet_mdio_wait_until_ready(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
/**
@@ -1027,8 +1574,6 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(struct work_struct *work);
-
/**
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
@@ -1036,73 +1581,234 @@ static void axienet_dma_err_handler(struct work_struct *work);
* Return: 0, on success.
* non-zero error value on failure
*
- * This is the driver open routine. It calls phylink_start to start the
- * PHY device.
+ * This is the driver open routine. It calls phy_start to start the PHY device.
* It also allocates interrupt service routines, enables the interrupt lines
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
* descriptors are initialized.
*/
static int axienet_open(struct net_device *ndev)
{
- int ret;
+ int ret = 0, i = 0;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ u32 reg, err;
+ struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- ret = axienet_device_reset(ndev);
- if (ret == 0)
- ret = axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G)
+ axienet_device_reset(ndev);
+ else
+ ret = axienet_mii_init(ndev);
+
if (ret < 0)
return ret;
- ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
- if (ret) {
- dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
- return ret;
+ if (lp->phy_node) {
+ if (lp->phy_mode == XAE_PHY_TYPE_GMII) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+ } else if (lp->phy_mode == XAE_PHY_TYPE_RGMII_2_0) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
+ } else if ((lp->axienet_config->mactype == XAXIENET_1G) ||
+ (lp->axienet_config->mactype == XAXIENET_2_5G)) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link,
+ lp->phy_flags,
+ lp->phy_interface);
+ }
+
+ if (!phydev)
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ else
+ phy_start(phydev);
}
- phylink_start(lp->phylink);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ /* Enable tasklets for Axi DMA error handling */
+ for_each_rx_dma_queue(lp, i) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_mcdma_err_handler,
+ (unsigned long)lp->dq[i]);
+#else
+ tasklet_init(&lp->dma_err_tasklet[i],
+ axienet_dma_err_handler,
+ (unsigned long)lp->dq[i]);
+#endif
- /* Enable worker thread for Axi DMA error handling */
- INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ /* Enable NAPI scheduling before enabling Axi DMA Rx IRQ, or you
+ * might run into a race condition; the RX ISR disables IRQ processing
+ * before scheduling the NAPI function to complete the processing.
+ * If NAPI scheduling is (still) disabled at that time, no more RX IRQs
+ * will be processed as only the NAPI function re-enables them!
+ */
+ napi_enable(&lp->napi[i]);
+ }
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Tx */
+ ret = request_irq(q->tx_irq, axienet_mcdma_tx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#else
+ /* Enable interrupts for Axi DMA Tx */
+ ret = request_irq(q->tx_irq, axienet_tx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_tx_irq;
+#endif
+ }
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Enable interrupts for Axi MCDMA Rx */
+ ret = request_irq(q->rx_irq, axienet_mcdma_rx_irq,
+ IRQF_SHARED, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#else
+ /* Enable interrupts for Axi DMA Rx */
+ ret = request_irq(q->rx_irq, axienet_rx_irq,
+ 0, ndev->name, ndev);
+ if (ret)
+ goto err_rx_irq;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ INIT_WORK(&lp->tx_tstamp_work, axienet_tx_tstamp);
+ skb_queue_head_init(&lp->ptp_txq);
+
+ lp->ptp_rx_hw_pointer = 0;
+ lp->ptp_rx_sw_pointer = 0xff;
+
+ axienet_iow(lp, PTP_RX_CONTROL_OFFSET, PTP_RX_PACKET_CLEAR);
+
+ ret = request_irq(lp->ptp_rx_irq, axienet_ptp_rx_irq,
+ 0, "ptp_rx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+
+ ret = request_irq(lp->ptp_tx_irq, axienet_ptp_tx_irq,
+ 0, "ptp_tx", ndev);
+ if (ret)
+ goto err_ptp_rx_irq;
+ }
+#endif
+
+ if (lp->phy_mode == XXE_PHY_TYPE_USXGMII) {
+ netdev_dbg(ndev, "RX reg: 0x%x\n",
+ axienet_ior(lp, XXV_RCW1_OFFSET));
+ /* USXGMII setup at selected speed */
+ reg = axienet_ior(lp, XXV_USXGMII_AN_OFFSET);
+ reg &= ~USXGMII_RATE_MASK;
+ netdev_dbg(ndev, "usxgmii_rate %d\n", lp->usxgmii_rate);
+ switch (lp->usxgmii_rate) {
+ case SPEED_1000:
+ reg |= USXGMII_RATE_1G;
+ break;
+ case SPEED_2500:
+ reg |= USXGMII_RATE_2G5;
+ break;
+ case SPEED_10:
+ reg |= USXGMII_RATE_10M;
+ break;
+ case SPEED_100:
+ reg |= USXGMII_RATE_100M;
+ break;
+ case SPEED_5000:
+ reg |= USXGMII_RATE_5G;
+ break;
+ case SPEED_10000:
+ reg |= USXGMII_RATE_10G;
+ break;
+ default:
+ reg |= USXGMII_RATE_1G;
+ }
+ reg |= USXGMII_FD;
+ reg |= (USXGMII_EN | USXGMII_LINK_STS);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ reg |= USXGMII_AN_EN;
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET, reg);
+ /* AN Restart bit should be reset, set and then reset as per
+ * spec with a 1 ms delay for a raising edge trigger
+ */
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg | USXGMII_AN_RESTART);
+ mdelay(1);
+ axienet_iow(lp, XXV_USXGMII_AN_OFFSET,
+ reg & ~USXGMII_AN_RESTART);
+
+ /* Check block lock bit to make sure RX path is ok with
+ * USXGMII initialization.
+ */
+ err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
+ reg, (reg & XXV_RX_BLKLCK_MASK),
+ 100, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII Block lock bit not set",
+ __func__);
+ ret = -ENODEV;
+ goto err_eth_irq;
+ }
+
+ err = readl_poll_timeout(lp->regs + XXV_USXGMII_AN_STS_OFFSET,
+ reg, (reg & USXGMII_AN_STS_COMP_MASK),
+ 1000000, DELAY_OF_ONE_MILLISEC);
+ if (err) {
+ netdev_err(ndev, "%s: USXGMII AN not complete",
+ __func__);
+ ret = -ENODEV;
+ goto err_eth_irq;
+ }
+
+ netdev_info(ndev, "USXGMII setup at %d\n", lp->usxgmii_rate);
+ }
- /* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
- ndev->name, ndev);
- if (ret)
- goto err_tx_irq;
- /* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
- ndev->name, ndev);
- if (ret)
- goto err_rx_irq;
/* Enable interrupts for Axi Ethernet core (if defined) */
- if (lp->eth_irq > 0) {
+ if (!lp->eth_hasnobuf && (lp->axienet_config->mactype == XAXIENET_1G)) {
ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
ndev->name, ndev);
if (ret)
goto err_eth_irq;
}
+ netif_tx_start_all_queues(ndev);
return 0;
err_eth_irq:
- free_irq(lp->rx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->rx_irq, ndev);
+ }
+ i = lp->num_tx_queues;
err_rx_irq:
- free_irq(lp->tx_irq, ndev);
+ while (i--) {
+ q = lp->dq[i];
+ free_irq(q->tx_irq, ndev);
+ }
err_tx_irq:
- phylink_stop(lp->phylink);
- phylink_disconnect_phy(lp->phylink);
- cancel_work_sync(&lp->dma_err_task);
+ for_each_rx_dma_queue(lp, i)
+ napi_disable(&lp->napi[i]);
+ if (phydev)
+ phy_disconnect(phydev);
+ phydev = NULL;
+#ifdef CONFIG_XILINX_TSN_PTP
+err_ptp_rx_irq:
+#endif
+ for_each_rx_dma_queue(lp, i)
+ tasklet_kill(&lp->dma_err_tasklet[i]);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -1113,7 +1819,7 @@ err_tx_irq:
*
* Return: 0, on success.
*
- * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
+ * This is the driver stop routine. It calls phy_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
* The Axi DMA Tx/Rx BDs are released.
*/
@@ -1121,54 +1827,78 @@ static int axienet_stop(struct net_device *ndev)
{
u32 cr, sr;
int count;
+ u32 i;
struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
dev_dbg(&ndev->dev, "axienet_close()\n");
- phylink_stop(lp->phylink);
- phylink_disconnect_phy(lp->phylink);
-
- axienet_setoptions(ndev, lp->options &
+ lp->axienet_config->setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
- axienet_iow(lp, XAE_IE_OFFSET, 0);
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
+ }
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
+ sr = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
+ }
- /* Do a reset to ensure DMA is really stopped */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ /* Do a reset to ensure DMA is really stopped */
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ }
- cancel_work_sync(&lp->dma_err_task);
+ __axienet_device_reset(q);
- if (lp->eth_irq > 0)
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+ }
+ free_irq(q->tx_irq, ndev);
+ }
+
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ netif_stop_queue(ndev);
+ napi_disable(&lp->napi[i]);
+ tasklet_kill(&lp->dma_err_tasklet[i]);
+ free_irq(q->rx_irq, ndev);
+ }
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ free_irq(lp->ptp_tx_irq, ndev);
+ free_irq(lp->ptp_rx_irq, ndev);
+ }
+#endif
+ if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
free_irq(lp->eth_irq, ndev);
- free_irq(lp->tx_irq, ndev);
- free_irq(lp->rx_irq, ndev);
- axienet_dma_bd_release(ndev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ if (lp->temac_no != XAE_TEMAC2)
+ axienet_dma_bd_release(ndev);
+ }
return 0;
}
@@ -1210,23 +1940,208 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
static void axienet_poll_controller(struct net_device *ndev)
{
struct axienet_local *lp = netdev_priv(ndev);
- disable_irq(lp->tx_irq);
- disable_irq(lp->rx_irq);
- axienet_rx_irq(lp->tx_irq, ndev);
- axienet_tx_irq(lp->rx_irq, ndev);
- enable_irq(lp->tx_irq);
- enable_irq(lp->rx_irq);
+ int i;
+
+ for_each_tx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ disable_irq(lp->dq[i]->rx_irq);
+
+ for_each_rx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_rx_irq(lp->dq[i]->rx_irq, ndev);
+#else
+ axienet_rx_irq(lp->dq[i]->rx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axienet_mcdma_tx_irq(lp->dq[i]->tx_irq, ndev);
+#else
+ axienet_tx_irq(lp->dq[i]->tx_irq, ndev);
+#endif
+ for_each_tx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->tx_irq);
+ for_each_rx_dma_queue(lp, i)
+ enable_irq(lp->dq[i]->rx_irq);
+}
+#endif
+
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_set_timestamp_mode - sets up the hardware for the requested mode
+ * @lp: Pointer to axienet local structure
+ * @config: the hwtstamp configuration requested
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_timestamp_mode(struct axienet_local *lp,
+ struct hwtstamp_config *config)
+{
+ u32 regval;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ if (config->tx_type < HWTSTAMP_TX_OFF ||
+ config->tx_type > HWTSTAMP_TX_ONESTEP_SYNC)
+ return -ERANGE;
+
+ lp->ptp_ts_type = config->tx_type;
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ }
+ return 0;
+ }
+#endif
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Read the current value in the MAC TX CTRL register */
+ regval = axienet_ior(lp, XAE_TC_OFFSET);
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ regval &= ~XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ON:
+ config->tx_type = HWTSTAMP_TX_ON;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
+ regval |= XAE_TC_INBAND1588_MASK;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_TC_OFFSET, regval);
+
+ /* Read the current value in the MAC RX RCW1 register */
+ regval = axienet_ior(lp, XAE_RCW1_OFFSET);
+
+ /* On RX always timestamp everything */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ regval &= ~XAE_RCW1_INBAND1588_MASK;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ regval |= XAE_RCW1_INBAND1588_MASK;
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_RCW1_OFFSET, regval);
+
+ return 0;
+}
+
+/**
+ * axienet_set_ts_config - user entry point for timestamp mode
+ * @lp: Pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Set hardware to the requested more. If unsupported return an error
+ * with no changes. Otherwise, store the mode for future reference
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_set_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = axienet_set_timestamp_mode(lp, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&lp->tstamp_config, &config, sizeof(lp->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * axienet_get_ts_config - return the current timestamp configuration
+ * to the user
+ * @lp: pointer to axienet local structure
+ * @ifr: ioctl data
+ *
+ * Return: 0 on success, Negative value on errors
+ */
+static int axienet_get_ts_config(struct axienet_local *lp, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &lp->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
}
#endif
+/* Ioctl MII Interface */
static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
struct axienet_local *lp = netdev_priv(dev);
+#endif
if (!netif_running(dev))
return -EINVAL;
- return phylink_mii_ioctl(lp->phylink, rq, cmd);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!dev->phydev)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
+ case SIOCSHWTSTAMP:
+ return axienet_set_ts_config(lp, rq);
+ case SIOCGHWTSTAMP:
+ return axienet_get_ts_config(lp, rq);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+#ifdef CONFIG_XILINX_TSN_QBR
+ case SIOC_PREEMPTION_CFG:
+ return axienet_preemption(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_CTRL:
+ return axienet_preemption_ctrl(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_STS:
+ return axienet_preemption_sts(dev, rq->ifr_data);
+ case SIOC_PREEMPTION_COUNTER:
+ return axienet_preemption_cnt(dev, rq->ifr_data);
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOC_QBU_USER_OVERRIDE:
+ return axienet_qbu_user_override(dev, rq->ifr_data);
+ case SIOC_QBU_STS:
+ return axienet_qbu_sts(dev, rq->ifr_data);
+#endif
+#endif
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops axienet_netdev_ops = {
@@ -1236,8 +2151,8 @@ static const struct net_device_ops axienet_netdev_ops = {
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
+ .ndo_do_ioctl = axienet_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
#endif
@@ -1286,7 +2201,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1312,24 +2227,29 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[15] = axienet_ior(lp, XAE_TC_OFFSET);
data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
- data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
+ data[18] = axienet_ior(lp, XAE_RMFC_OFFSET);
data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
+ data[23] = axienet_ior(lp, XAE_TEMAC_IS_OFFSET);
+ data[24] = axienet_ior(lp, XAE_TEMAC_IP_OFFSET);
+ data[25] = axienet_ior(lp, XAE_TEMAC_IE_OFFSET);
+ data[26] = axienet_ior(lp, XAE_TEMAC_IC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
- data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+ data[29] = axienet_ior(lp, XAE_FMC_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
- data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
- data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
- data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
- data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+ /* Support only single DMA queue */
+ data[32] = axienet_dma_in32(lp->dq[0], XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp->dq[0], XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp->dq[0], XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp->dq[0], XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp->dq[0], XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp->dq[0], XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp->dq[0], XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp->dq[0], XAXIDMA_RX_TDESC_OFFSET);
}
static void axienet_ethtools_get_ringparam(struct net_device *ndev,
@@ -1379,16 +2299,20 @@ static void
axienet_ethtools_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
+ u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
- phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
+ epauseparm->autoneg = 0;
+ regval = axienet_ior(lp, XAE_FCC_OFFSET);
+ epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
+ epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
}
/**
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparm:Pointer to ethtool_pauseparam structure
+ * @epauseparm: Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
@@ -1400,9 +2324,27 @@ static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
+ u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
+ if (netif_running(ndev)) {
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
+ return -EFAULT;
+ }
+
+ regval = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (epauseparm->tx_pause)
+ regval |= XAE_FCC_FCTX_MASK;
+ else
+ regval &= ~XAE_FCC_FCTX_MASK;
+ if (epauseparm->rx_pause)
+ regval |= XAE_FCC_FCRX_MASK;
+ else
+ regval &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, regval);
+
+ return 0;
}
/**
@@ -1421,12 +2363,24 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
{
u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+ struct axienet_dma_q *q;
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ regval = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
+ ecoalesce->rx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
+ for_each_tx_dma_queue(lp, i) {
+ q = lp->dq[i];
+ regval = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
+ ecoalesce->tx_max_coalesced_frames +=
+ (regval & XAXIDMA_COALESCE_MASK)
+ >> XAXIDMA_COALESCE_SHIFT;
+ }
return 0;
}
@@ -1460,23 +2414,31 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
-static int
-axienet_ethtools_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct axienet_local *lp = netdev_priv(ndev);
-
- return phylink_ethtool_ksettings_get(lp->phylink, cmd);
-}
-
-static int
-axienet_ethtools_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
+/**
+ * axienet_ethtools_get_ts_info - Get h/w timestamping capabilities.
+ * @ndev: Pointer to net_device structure
+ * @info: Pointer to ethtool_ts_info structure
+ *
+ * Return: 0, on success, Non-zero error value on failure.
+ */
+static int axienet_ethtools_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
{
- struct axienet_local *lp = netdev_priv(ndev);
-
- return phylink_ethtool_ksettings_set(lp->phylink, cmd);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = 0;
+
+#ifdef CONFIG_XILINX_TSN_PTP
+ info->phc_index = axienet_phc_index;
+#endif
+ return 0;
}
+#endif
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1485,281 +2447,479 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.get_regs = axienet_ethtools_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = axienet_ethtools_get_ringparam,
- .set_ringparam = axienet_ethtools_set_ringparam,
+ .set_ringparam = axienet_ethtools_set_ringparam,
.get_pauseparam = axienet_ethtools_get_pauseparam,
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
- .get_link_ksettings = axienet_ethtools_get_link_ksettings,
- .set_link_ksettings = axienet_ethtools_set_link_ksettings,
+#if defined(CONFIG_XILINX_AXI_EMAC_HWTSTAMP) || defined(CONFIG_XILINX_TSN_PTP)
+ .get_ts_info = axienet_ethtools_get_ts_info,
+#endif
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ .get_sset_count = axienet_sset_count,
+ .get_ethtool_stats = axienet_get_stats,
+ .get_strings = axienet_strings,
+#endif
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+static int __maybe_unused axienet_mcdma_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ int i, ret = 0;
+ struct axienet_dma_q *q;
+ struct device_node *np;
+ struct resource dmares;
+ const char *str;
+
+ ret = of_property_count_strings(pdev->dev.of_node, "xlnx,channel-ids");
+ if (ret < 0)
+ return -EINVAL;
+
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ /* parent */
+ q->lp = lp;
+ lp->dq[i] = q;
+ ret = of_property_read_string_index(pdev->dev.of_node,
+ "xlnx,channel-ids", i,
+ &str);
+ ret = kstrtou16(str, 16, &q->chan_id);
+ lp->qnum[i] = i;
+ lp->chan_num[i] = q->chan_id;
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ return ret;
+ }
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ return ret;
}
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
+ ret = of_property_read_u8(np, "xlnx,addrwidth", (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+ lp->mcdma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (IS_ERR(lp->mcdma_regs)) {
+ dev_err(&pdev->dev, "iormeap failed for the dma\n");
+ ret = PTR_ERR(lp->mcdma_regs);
+ return ret;
+ }
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ axienet_mcdma_tx_probe(pdev, np, lp);
+ axienet_mcdma_rx_probe(pdev, lp, ndev);
+
+ return 0;
}
+#endif
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static int __maybe_unused axienet_dma_probe(struct platform_device *pdev,
+ struct net_device *ndev)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ int i, ret;
struct axienet_local *lp = netdev_priv(ndev);
- u32 emmc_reg, fcc_reg;
+ struct axienet_dma_q *q;
+ struct device_node *np = NULL;
+ struct resource dmares;
+#ifdef CONFIG_XILINX_TSN
+ char dma_name[10];
+#endif
- state->interface = lp->phy_mode;
+ for_each_rx_dma_queue(lp, i) {
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- if (emmc_reg & XAE_EMMC_LINKSPD_1000)
- state->speed = SPEED_1000;
- else if (emmc_reg & XAE_EMMC_LINKSPD_100)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
+ /* parent */
+ q->lp = lp;
- state->pause = 0;
- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (fcc_reg & XAE_FCC_FCTX_MASK)
- state->pause |= MLO_PAUSE_TX;
- if (fcc_reg & XAE_FCC_FCRX_MASK)
- state->pause |= MLO_PAUSE_RX;
+ lp->dq[i] = q;
+ }
- state->an_complete = 0;
- state->duplex = 1;
-}
+ /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+ /* TODO handle error ret */
+ for_each_rx_dma_queue(lp, i) {
+ q = lp->dq[i];
+
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
+ i);
+ if (np) {
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret >= 0) {
+ q->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ } else {
+ dev_err(&pdev->dev, "unable to get DMA resource for %pOF\n",
+ np);
+ return -ENODEV;
+ }
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ ret = of_property_read_u8(np, "xlnx,addrwidth",
+ (u8 *)&lp->dma_mask);
+ if (ret < 0 || lp->dma_mask < XAE_DMA_MASK_MIN ||
+ lp->dma_mask > XAE_DMA_MASK_MAX) {
+ dev_info(&pdev->dev, "missing/invalid xlnx,addrwidth property, using default\n");
+ lp->dma_mask = XAE_DMA_MASK_MIN;
+ }
-static void axienet_mac_an_restart(struct phylink_config *config)
-{
- /* Unsupported, do nothing */
-}
+ } else {
+ dev_err(&pdev->dev, "missing axistream-connected property\n");
+ return -EINVAL;
+ }
+ }
-static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
-{
- /* nothing meaningful to do */
+#ifdef CONFIG_XILINX_TSN
+ if (lp->is_tsn) {
+ for_each_rx_dma_queue(lp, i) {
+ sprintf(dma_name, "dma%d_tx", i);
+ lp->dq[i]->tx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ sprintf(dma_name, "dma%d_rx", i);
+ lp->dq[i]->rx_irq = platform_get_irq_byname(pdev,
+ dma_name);
+ pr_info("lp->dq[%d]->tx_irq %d\n", i,
+ lp->dq[i]->tx_irq);
+ pr_info("lp->dq[%d]->rx_irq %d\n", i,
+ lp->dq[i]->rx_irq);
+ }
+ } else {
+#endif /* This should remove when axienet device tree irq comply to dma name */
+ for_each_rx_dma_queue(lp, i) {
+ lp->dq[i]->tx_irq = irq_of_parse_and_map(np, 0);
+ lp->dq[i]->rx_irq = irq_of_parse_and_map(np, 1);
+ }
+#ifdef CONFIG_XILINX_TSN
+ }
+#endif
+
+ of_node_put(np);
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q = lp->dq[i];
+
+ spin_lock_init(&q->tx_lock);
+ spin_lock_init(&q->rx_lock);
+ }
+
+ for_each_rx_dma_queue(lp, i) {
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
}
-static void axienet_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
+static int axienet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk)
{
- /* nothing meaningful to do */
+ int err;
+
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
+ */
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_lite_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
+
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "axis_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
+ }
+ *axis_clk = NULL;
+ }
+
+ *ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(*ref_clk)) {
+ if (PTR_ERR(*ref_clk) != -ENOENT) {
+ err = PTR_ERR(*ref_clk);
+ return err;
+ }
+ *ref_clk = NULL;
+ }
+
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_aclk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*ref_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable ref_clk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
}
-static void axienet_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
+static int axienet_dma_clk_init(struct platform_device *pdev)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ int err;
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
- u32 emmc_reg, fcc_reg;
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+ /* The "dma_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
+ */
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "dma_clk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
- switch (speed) {
- case SPEED_1000:
- emmc_reg |= XAE_EMMC_LINKSPD_1000;
- break;
- case SPEED_100:
- emmc_reg |= XAE_EMMC_LINKSPD_100;
- break;
- case SPEED_10:
- emmc_reg |= XAE_EMMC_LINKSPD_10;
- break;
- default:
- dev_err(&ndev->dev,
- "Speed other than 10, 100 or 1Gbps is not supported\n");
- break;
+ lp->dma_tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(lp->dma_tx_clk)) {
+ if (PTR_ERR(lp->dma_tx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_tx_clk);
+ return err;
+ }
+ lp->dma_tx_clk = NULL;
+ }
+ } else {
+ dev_warn(&pdev->dev, "dma_clk is deprecated and will be removed sometime in the future\n");
}
- axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+ lp->dma_rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(lp->dma_rx_clk)) {
+ if (PTR_ERR(lp->dma_rx_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_rx_clk);
+ return err;
+ }
+ lp->dma_rx_clk = NULL;
+ }
- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (tx_pause)
- fcc_reg |= XAE_FCC_FCTX_MASK;
- else
- fcc_reg &= ~XAE_FCC_FCTX_MASK;
- if (rx_pause)
- fcc_reg |= XAE_FCC_FCRX_MASK;
- else
- fcc_reg &= ~XAE_FCC_FCRX_MASK;
- axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
+ lp->dma_sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(lp->dma_sg_clk)) {
+ if (PTR_ERR(lp->dma_sg_clk) != -ENOENT) {
+ err = PTR_ERR(lp->dma_sg_clk);
+ return err;
+ }
+ lp->dma_sg_clk = NULL;
+ }
+
+ err = clk_prepare_enable(lp->dma_tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk/dma_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(lp->dma_rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(lp->dma_sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(lp->dma_rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(lp->dma_tx_clk);
+
+ return err;
}
-static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
- .mac_config = axienet_mac_config,
- .mac_link_down = axienet_mac_link_down,
- .mac_link_up = axienet_mac_link_up,
-};
+static void axienet_clk_disable(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct axienet_local *lp = netdev_priv(ndev);
-/**
- * axienet_dma_err_handler - Work queue task for Axi DMA Error
- * @work: pointer to work_struct
- *
- * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
- * Tx/Rx BDs.
- */
-static void axienet_dma_err_handler(struct work_struct *work)
+ clk_disable_unprepare(lp->dma_sg_clk);
+ clk_disable_unprepare(lp->dma_tx_clk);
+ clk_disable_unprepare(lp->dma_rx_clk);
+ clk_disable_unprepare(lp->eth_sclk);
+ clk_disable_unprepare(lp->eth_refclk);
+ clk_disable_unprepare(lp->eth_dclk);
+ clk_disable_unprepare(lp->aclk);
+}
+
+static int xxvenet_clk_init(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **tmpclk, struct clk **dclk)
{
- u32 axienet_status;
- u32 cr, i;
- struct axienet_local *lp = container_of(work, struct axienet_local,
- dma_err_task);
- struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+ int err;
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+ *tmpclk = NULL;
+
+ /* The "ethernet_clk" is deprecated and will be removed sometime in
+ * the future. For proper clock usage check axiethernet binding
+ * documentation.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+ *axi_aclk = devm_clk_get(&pdev->dev, "ethernet_clk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
- for (i = 0; i < lp->tx_bd_num; i++) {
- cur_p = &lp->tx_bd_v[i];
- if (cur_p->cntrl) {
- dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+ *axi_aclk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(*axi_aclk)) {
+ if (PTR_ERR(*axi_aclk) != -ENOENT) {
+ err = PTR_ERR(*axi_aclk);
+ return err;
+ }
+ *axi_aclk = NULL;
+ }
- dma_unmap_single(ndev->dev.parent, addr,
- (cur_p->cntrl &
- XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
+ } else {
+ dev_warn(&pdev->dev, "ethernet_clk is deprecated and will be removed sometime in the future\n");
+ }
+
+ *axis_clk = devm_clk_get(&pdev->dev, "rx_core_clk");
+ if (IS_ERR(*axis_clk)) {
+ if (PTR_ERR(*axis_clk) != -ENOENT) {
+ err = PTR_ERR(*axis_clk);
+ return err;
}
- if (cur_p->skb)
- dev_kfree_skb_irq(cur_p->skb);
- cur_p->phys = 0;
- cur_p->phys_msb = 0;
- cur_p->cntrl = 0;
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
- cur_p->skb = NULL;
+ *axis_clk = NULL;
}
- for (i = 0; i < lp->rx_bd_num; i++) {
- cur_p = &lp->rx_bd_v[i];
- cur_p->status = 0;
- cur_p->app0 = 0;
- cur_p->app1 = 0;
- cur_p->app2 = 0;
- cur_p->app3 = 0;
- cur_p->app4 = 0;
+ *dclk = devm_clk_get(&pdev->dev, "dclk");
+ if (IS_ERR(*dclk)) {
+ if (PTR_ERR(*dclk) != -ENOENT) {
+ err = PTR_ERR(*dclk);
+ return err;
+ }
+ *dclk = NULL;
}
- lp->tx_bd_ci = 0;
- lp->tx_bd_tail = 0;
- lp->rx_bd_ci = 0;
-
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
-
- axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
- axienet_status &= ~XAE_RCW1_RX_MASK;
- axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
-
- axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
- if (axienet_status & XAE_INT_RXRJECT_MASK)
- axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
- axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
- XAE_INT_RECV_ERROR_MASK : 0);
- axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
-
- /* Sync default options with HW but leave receiver and
- * transmitter disabled.
- */
- axienet_setoptions(ndev, lp->options &
- ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- axienet_set_mac_address(ndev, NULL);
- axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
+ err = clk_prepare_enable(*axi_aclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk/ethernet_clk (%d)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axis_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axis_clk (%d)\n", err);
+ goto err_disable_axi_aclk;
+ }
+
+ err = clk_prepare_enable(*dclk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dclk (%d)\n", err);
+ goto err_disable_axis_clk;
+ }
+
+ return 0;
+
+err_disable_axis_clk:
+ clk_disable_unprepare(*axis_clk);
+err_disable_axi_aclk:
+ clk_disable_unprepare(*axi_aclk);
+
+ return err;
}
+static const struct axienet_config axienet_1g_config = {
+ .mactype = XAXIENET_1G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_2_5g_config = {
+ .mactype = XAXIENET_2_5G,
+ .setoptions = axienet_setoptions,
+ .clk_init = axienet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g_config = {
+ .mactype = XAXIENET_LEGACY_10G,
+ .setoptions = axienet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XAE_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_10g25g_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = XXV_TX_PTP_LEN,
+};
+
+static const struct axienet_config axienet_usxgmii_config = {
+ .mactype = XAXIENET_10G_25G,
+ .setoptions = xxvenet_setoptions,
+ .clk_init = xxvenet_clk_init,
+ .tx_ptplen = 0,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id axienet_of_match[] = {
+ { .compatible = "xlnx,axi-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-1.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-ethernet-2.01.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,axi-2_5-gig-ethernet-1.0",
+ .data = &axienet_2_5g_config},
+ { .compatible = "xlnx,ten-gig-eth-mac", .data = &axienet_10g_config},
+ { .compatible = "xlnx,xxv-ethernet-1.0",
+ .data = &axienet_10g25g_config},
+ { .compatible = "xlnx,tsn-ethernet-1.00.a", .data = &axienet_1g_config},
+ { .compatible = "xlnx,xxv-usxgmii-ethernet-1.0",
+ .data = &axienet_usxgmii_config},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, axienet_of_match);
+
/**
* axienet_probe - Axi Ethernet probe function.
* @pdev: Pointer to platform device structure.
@@ -1774,16 +2934,40 @@ static void axienet_dma_err_handler(struct work_struct *work)
*/
static int axienet_probe(struct platform_device *pdev)
{
- int ret;
+ int (*axienet_clk_init)(struct platform_device *pdev,
+ struct clk **axi_aclk, struct clk **axis_clk,
+ struct clk **ref_clk, struct clk **tmpclk) =
+ axienet_clk_init;
+ int ret = 0;
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
struct device_node *np;
+#endif
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
struct resource *ethres;
- int addr_width = 32;
u32 value;
+ u16 num_queues = XAE_MAX_QUEUES;
+ bool slave = false;
+ bool is_tsn = false;
- ndev = alloc_etherdev(sizeof(*lp));
+ is_tsn = of_property_read_bool(pdev->dev.of_node, "xlnx,tsn");
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-queues",
+ &num_queues);
+ if (ret) {
+ if (!is_tsn) {
+#ifndef CONFIG_AXIENET_HAS_MCDMA
+ num_queues = 1;
+#endif
+ }
+ }
+#ifdef CONFIG_XILINX_TSN
+ if (is_tsn && (num_queues < XAE_TSN_MIN_QUEUES ||
+ num_queues > XAE_MAX_QUEUES))
+ num_queues = XAE_MAX_QUEUES;
+#endif
+
+ ndev = alloc_etherdev_mq(sizeof(*lp), num_queues);
if (!ndev)
return -ENOMEM;
@@ -1803,37 +2987,67 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->num_tx_queues = num_queues;
+ lp->num_rx_queues = num_queues;
+ lp->is_tsn = is_tsn;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+
+#ifdef CONFIG_XILINX_TSN
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &lp->num_tc);
+ if (ret || (lp->num_tc != 2 && lp->num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+#endif
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (IS_ERR(lp->regs)) {
- dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
goto free_netdev;
}
+#ifdef CONFIG_XILINX_TSN
+ slave = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,tsn-slave");
+ if (slave)
+ lp->temac_no = XAE_TEMAC2;
+ else
+ lp->temac_no = XAE_TEMAC1;
+#endif
lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(axienet_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ lp->axienet_config = match->data;
+ axienet_clk_init = lp->axienet_config->clk_init;
+ }
+ }
+
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "TX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
case 2:
lp->csum_offload_on_tx_path =
XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
break;
default:
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
@@ -1841,6 +3055,8 @@ static int axienet_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
+ dev_info(&pdev->dev, "RX_CSUM %d\n", value);
+
switch (value) {
case 1:
lp->csum_offload_on_rx_path =
@@ -1864,104 +3080,121 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
- /* Start with the proprietary, and broken phy_type */
- ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
- if (!ret) {
- netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
- switch (value) {
- case XAE_PHY_TYPE_MII:
- lp->phy_mode = PHY_INTERFACE_MODE_MII;
- break;
- case XAE_PHY_TYPE_GMII:
- lp->phy_mode = PHY_INTERFACE_MODE_GMII;
- break;
- case XAE_PHY_TYPE_RGMII_2_0:
- lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- break;
- case XAE_PHY_TYPE_SGMII:
- lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
- break;
- case XAE_PHY_TYPE_1000BASE_X:
- lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
- break;
- default:
- ret = -EINVAL;
+ /* The phy_mode is optional but when it is not specified it should not
+ * be a value that alters the driver behavior so set it to an invalid
+ * value as the default.
+ */
+ lp->phy_mode = ~0;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_mode);
+ /* Set default USXGMII rate */
+ lp->usxgmii_rate = SPEED_1000;
+ of_property_read_u32(pdev->dev.of_node, "xlnx,usxgmii-rate",
+ &lp->usxgmii_rate);
+
+ lp->eth_hasnobuf = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasnobuf");
+ lp->eth_hasptp = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,eth-hasptp");
+
+ if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
+ lp->eth_irq = platform_get_irq(pdev, 0);
+
+#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
+ if (!lp->is_tsn) {
+ struct resource txtsres, rxtsres;
+
+ /* Find AXI Stream FIFO */
+ np = of_parse_phandle(pdev->dev.of_node, "axififo-connected",
+ 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find TX Timestamp FIFO\n");
+ ret = PTR_ERR(np);
goto free_netdev;
}
- } else {
- ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
- if (ret)
- goto free_netdev;
- }
-
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (np) {
- struct resource dmares;
- ret = of_address_to_resource(np, 0, &dmares);
+ ret = of_address_to_resource(np, 0, &txtsres);
if (ret) {
dev_err(&pdev->dev,
- "unable to get DMA resource\n");
- of_node_put(np);
+ "unable to get Tx Timestamp resource\n");
+ goto free_netdev;
+ }
+
+ lp->tx_ts_regs = devm_ioremap_resource(&pdev->dev, &txtsres);
+ if (IS_ERR(lp->tx_ts_regs)) {
+ dev_err(&pdev->dev, "could not map Tx Timestamp regs\n");
+ ret = PTR_ERR(lp->tx_ts_regs);
goto free_netdev;
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev,
- &dmares);
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
+
+ if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
+ np = of_parse_phandle(pdev->dev.of_node,
+ "xlnx,rxtsfifo", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev,
+ "couldn't find rx-timestamp FIFO\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
+ }
+
+ ret = of_address_to_resource(np, 0, &rxtsres);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get rx-timestamp resource\n");
+ goto free_netdev;
+ }
+
+ lp->rx_ts_regs = devm_ioremap_resource(&pdev->dev,
+ &rxtsres);
+ if (IS_ERR(lp->rx_ts_regs)) {
+ dev_err(&pdev->dev,
+ "couldn't map rx-timestamp regs\n");
+ ret = PTR_ERR(lp->rx_ts_regs);
+ goto free_netdev;
+ }
+ lp->tx_ptpheader = devm_kzalloc(&pdev->dev,
+ XXVENET_TS_HEADER_LEN,
+ GFP_KERNEL);
+ }
+
of_node_put(np);
- lp->eth_irq = platform_get_irq_optional(pdev, 0);
- } else {
- /* Check for these resources directly on the Ethernet node. */
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
- lp->rx_irq = platform_get_irq(pdev, 1);
- lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq_optional(pdev, 2);
- }
- if (IS_ERR(lp->dma_regs)) {
- dev_err(&pdev->dev, "could not map DMA regs\n");
- ret = PTR_ERR(lp->dma_regs);
- goto free_netdev;
- }
- if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&pdev->dev, "could not determine irqs\n");
- ret = -ENOMEM;
- goto free_netdev;
}
+#endif
+ if (!slave) {
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ ret = axienet_mcdma_probe(pdev, lp, ndev);
+#else
+ ret = axienet_dma_probe(pdev, ndev);
+#endif
+ if (ret) {
+ pr_err("Getting DMA resource failed\n");
+ goto free_netdev;
+ }
- /* Autodetect the need for 64-bit DMA pointers.
- * When the IP is configured for a bus width bigger than 32 bits,
- * writing the MSB registers is mandatory, even if they are all 0.
- * We can detect this case by writing all 1's to one such register
- * and see if that sticks: when the IP is configured for 32 bits
- * only, those registers are RES0.
- * Those MSB registers were introduced in IP v7.1, which we check first.
- */
- if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
- void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
-
- iowrite32(0x0, desc);
- if (ioread32(desc) == 0) { /* sanity check */
- iowrite32(0xffffffff, desc);
- if (ioread32(desc) > 0) {
- lp->features |= XAE_FEATURE_DMA_64BIT;
- addr_width = 64;
- dev_info(&pdev->dev,
- "autodetected 64-bit DMA range\n");
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(lp->dma_mask)) != 0) {
+ dev_warn(&pdev->dev, "default to %d-bit dma mask\n", XAE_DMA_MASK_MIN);
+ if (dma_set_mask_and_coherent(lp->dev, DMA_BIT_MASK(XAE_DMA_MASK_MIN)) != 0) {
+ dev_err(&pdev->dev, "dma_set_mask_and_coherent failed, aborting\n");
+ goto free_netdev;
}
- iowrite32(0x0, desc);
+ }
+
+ ret = axienet_dma_clk_init(pdev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "DMA clock init failed %d\n", ret);
+ goto free_netdev;
}
}
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ ret = axienet_clk_init(pdev, &lp->aclk, &lp->eth_sclk,
+ &lp->eth_refclk, &lp->eth_dclk);
if (ret) {
- dev_err(&pdev->dev, "No suitable DMA available\n");
- goto free_netdev;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Ethernet clock init failed %d\n", ret);
+ goto err_disable_clk;
}
+ lp->eth_irq = platform_get_irq(pdev, 0);
/* Check for Ethernet core IRQ (optional) */
if (lp->eth_irq <= 0)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
@@ -1978,6 +3211,13 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "couldn't find phy i/f\n");
+ lp->phy_interface = ret;
+ if (lp->phy_mode == XAE_PHY_TYPE_1000BASE_X)
+ lp->phy_flags = XAE_PHY_TYPE_1000BASE_X;
+
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
lp->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2000,26 +3240,53 @@ static int axienet_probe(struct platform_device *pdev)
"error registering MDIO bus: %d\n", ret);
}
- lp->phylink_config.dev = &ndev->dev;
- lp->phylink_config.type = PHYLINK_NETDEV;
-
- lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
- lp->phy_mode,
- &axienet_phylink_ops);
- if (IS_ERR(lp->phylink)) {
- ret = PTR_ERR(lp->phylink);
- dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
- goto free_netdev;
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ /* Create sysfs file entries for the device */
+ ret = axeinet_mcdma_create_sysfs(&lp->dev->kobj);
+ if (ret < 0) {
+ dev_err(lp->dev, "unable to create sysfs entries\n");
+ return ret;
}
+#endif
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto free_netdev;
+ axienet_mdio_teardown(lp);
+ goto err_disable_clk;
}
+#ifdef CONFIG_XILINX_TSN_PTP
+ if (lp->is_tsn) {
+ lp->ptp_rx_irq = platform_get_irq_byname(pdev, "ptp_rx");
+
+ lp->ptp_tx_irq = platform_get_irq_byname(pdev, "ptp_tx");
+
+ lp->qbv_irq = platform_get_irq_byname(pdev, "qbv_irq");
+
+ pr_debug("ptp RX irq: %d\n", lp->ptp_rx_irq);
+ pr_debug("ptp TX irq: %d\n", lp->ptp_tx_irq);
+ pr_debug("qbv_irq: %d\n", lp->qbv_irq);
+
+ spin_lock_init(&lp->ptp_tx_lock);
+
+ if (lp->temac_no == XAE_TEMAC1) {
+ axienet_ptp_timer_probe((lp->regs + XAE_RTC_OFFSET),
+ pdev);
+
+ /* enable VLAN */
+ lp->options |= XAE_OPTION_VLAN;
+ axienet_setoptions(lp->ndev, lp->options);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_init(ndev);
+#endif
+ }
+ }
+#endif
return 0;
+err_disable_clk:
+ axienet_clk_disable(pdev);
free_netdev:
free_netdev(ndev);
@@ -2030,17 +3297,30 @@ static int axienet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
+ int i;
- unregister_netdev(ndev);
-
- if (lp->phylink)
- phylink_destroy(lp->phylink);
+ if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
+ for_each_rx_dma_queue(lp, i)
+ netif_napi_del(&lp->napi[i]);
+ }
+#ifdef CONFIG_XILINX_TSN_PTP
+ axienet_ptp_timer_remove(lp->timer_priv);
+#ifdef CONFIG_XILINX_TSN_QBV
+ axienet_qbv_remove(ndev);
+#endif
+#endif
+ unregister_netdev(ndev);
+ axienet_clk_disable(pdev);
- axienet_mdio_teardown(lp);
+ if (lp->mii_bus)
+ axienet_mdio_teardown(lp);
if (lp->clk)
clk_disable_unprepare(lp->clk);
+#ifdef CONFIG_AXIENET_HAS_MCDMA
+ axeinet_mcdma_remove_sysfs(&lp->dev->kobj);
+#endif
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
new file mode 100644
index 000000000000..6c18e51ed515
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mcdma.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Xilinx AXI Ethernet (MCDMA programming)
+ *
+ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
+ * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
+ * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc.
+ * Copyright (C) 2018 Xilinx, Inc. All rights reserved.
+ *
+ * This file contains helper functions for AXI MCDMA TX and RX programming.
+ */
+
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+
+#include "xilinx_axienet.h"
+
+struct axienet_stat {
+ const char *name;
+};
+
+static struct axienet_stat axienet_get_tx_strings_stats[] = {
+ { "txq0_packets" },
+ { "txq0_bytes" },
+ { "txq1_packets" },
+ { "txq1_bytes" },
+ { "txq2_packets" },
+ { "txq2_bytes" },
+ { "txq3_packets" },
+ { "txq3_bytes" },
+ { "txq4_packets" },
+ { "txq4_bytes" },
+ { "txq5_packets" },
+ { "txq5_bytes" },
+ { "txq6_packets" },
+ { "txq6_bytes" },
+ { "txq7_packets" },
+ { "txq7_bytes" },
+ { "txq8_packets" },
+ { "txq8_bytes" },
+ { "txq9_packets" },
+ { "txq9_bytes" },
+ { "txq10_packets" },
+ { "txq10_bytes" },
+ { "txq11_packets" },
+ { "txq11_bytes" },
+ { "txq12_packets" },
+ { "txq12_bytes" },
+ { "txq13_packets" },
+ { "txq13_bytes" },
+ { "txq14_packets" },
+ { "txq14_bytes" },
+ { "txq15_packets" },
+ { "txq15_bytes" },
+};
+
+static struct axienet_stat axienet_get_rx_strings_stats[] = {
+ { "rxq0_packets" },
+ { "rxq0_bytes" },
+ { "rxq1_packets" },
+ { "rxq1_bytes" },
+ { "rxq2_packets" },
+ { "rxq2_bytes" },
+ { "rxq3_packets" },
+ { "rxq3_bytes" },
+ { "rxq4_packets" },
+ { "rxq4_bytes" },
+ { "rxq5_packets" },
+ { "rxq5_bytes" },
+ { "rxq6_packets" },
+ { "rxq6_bytes" },
+ { "rxq7_packets" },
+ { "rxq7_bytes" },
+ { "rxq8_packets" },
+ { "rxq8_bytes" },
+ { "rxq9_packets" },
+ { "rxq9_bytes" },
+ { "rxq10_packets" },
+ { "rxq10_bytes" },
+ { "rxq11_packets" },
+ { "rxq11_bytes" },
+ { "rxq12_packets" },
+ { "rxq12_bytes" },
+ { "rxq13_packets" },
+ { "rxq13_bytes" },
+ { "rxq14_packets" },
+ { "rxq14_bytes" },
+ { "rxq15_packets" },
+ { "rxq15_bytes" },
+};
+
+/**
+ * axienet_mcdma_tx_bd_free - Release MCDMA Tx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_tx_q_init.
+ */
+void __maybe_unused axienet_mcdma_tx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (q->txq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * lp->tx_bd_num,
+ q->txq_bd_v,
+ q->tx_bd_p);
+ }
+ if (q->tx_bufs) {
+ dma_free_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ q->tx_bufs,
+ q->tx_bufs_dma);
+ }
+}
+
+/**
+ * axienet_mcdma_rx_bd_free - Release MCDMA Rx buffer descriptor rings
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * This function is used to release the descriptors allocated in
+ * axienet_mcdma_rx_q_init.
+ */
+void __maybe_unused axienet_mcdma_rx_bd_free(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_unmap_single(ndev->dev.parent, q->rxq_bd_v[i].phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ dev_kfree_skb((struct sk_buff *)
+ (q->rxq_bd_v[i].sw_id_offset));
+ }
+
+ if (q->rxq_bd_v) {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
+ q->rxq_bd_v,
+ q->rx_bd_p);
+ }
+}
+
+/**
+ * axienet_mcdma_tx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Tx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_tx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+
+ q->txq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->txq_bd_v) * lp->tx_bd_num,
+ &q->tx_bd_p, GFP_KERNEL);
+ if (!q->txq_bd_v)
+ goto out;
+
+ if (!q->eth_hasdre) {
+ q->tx_bufs = dma_alloc_coherent(ndev->dev.parent,
+ XAE_MAX_PKT_LEN * lp->tx_bd_num,
+ &q->tx_bufs_dma,
+ GFP_KERNEL);
+ if (!q->tx_bufs)
+ goto out;
+
+ for (i = 0; i < lp->tx_bd_num; i++)
+ q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ q->txq_bd_v[i].next = q->tx_bd_p +
+ sizeof(*q->txq_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+ }
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ return 0;
+out:
+ for_each_tx_dma_queue(lp, i) {
+ axienet_mcdma_tx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * axienet_mcdma_rx_q_init - Setup buffer descriptor rings for individual Axi
+ * MCDMA-Rx
+ * @ndev: Pointer to the net_device structure
+ * @q: Pointer to DMA queue structure
+ *
+ * Return: 0, on success -ENOMEM, on failure
+ *
+ * This function is helper function to axienet_dma_bd_init
+ */
+int __maybe_unused axienet_mcdma_rx_q_init(struct net_device *ndev,
+ struct axienet_dma_q *q)
+{
+ u32 cr, chan_en;
+ int i;
+ struct sk_buff *skb;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ q->rx_bd_ci = 0;
+ q->rx_offset = XMCDMA_CHAN_RX_OFFSET;
+
+ q->rxq_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(*q->rxq_bd_v) * lp->rx_bd_num,
+ &q->rx_bd_p, GFP_KERNEL);
+ if (!q->rxq_bd_v)
+ goto out;
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ q->rxq_bd_v[i].next = q->rx_bd_p +
+ sizeof(*q->rxq_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ goto out;
+
+ /* Ensure that the skb is completely updated
+ * prio to mapping the DMA
+ */
+ wmb();
+
+ q->rxq_bd_v[i].sw_id_offset = (phys_addr_t)skb;
+ q->rxq_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+ skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ q->rxq_bd_v[i].cntrl = lp->max_frm_size;
+ }
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (lp->rx_bd_num - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ return 0;
+
+out:
+ for_each_rx_dma_queue(lp, i) {
+ axienet_mcdma_rx_bd_free(ndev, lp->dq[i]);
+ }
+ return -ENOMEM;
+}
+
+static inline int get_mcdma_tx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_tx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int get_mcdma_rx_q(struct axienet_local *lp, u32 chan_id)
+{
+ int i;
+
+ for_each_rx_dma_queue(lp, i) {
+ if (chan_id == lp->chan_num[i])
+ return lp->qnum[i];
+ }
+
+ return -ENODEV;
+}
+
+static inline int map_dma_q_txirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_TXINT_SER_OFFSET);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_txirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_tx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id));
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id), status);
+ axienet_start_xmit_done(lp->ndev, q);
+ goto out;
+ }
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->txq_bd_v[q->tx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static inline int map_dma_q_rxirq(int irq, struct axienet_local *lp)
+{
+ int i, chan_sermask;
+ u16 chan_id = 1;
+ struct axienet_dma_q *q = lp->dq[0];
+
+ chan_sermask = axienet_dma_in32(q, XMCDMA_RXINT_SER_OFFSET +
+ q->rx_offset);
+
+ for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
+ i <<= 1, chan_id++) {
+ if (chan_sermask & i)
+ return chan_id;
+ }
+
+ return -ENODEV;
+}
+
+irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev)
+{
+ u32 cr;
+ unsigned int status;
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ int i, j = map_dma_q_rxirq(irq, lp);
+ struct axienet_dma_q *q;
+
+ if (j < 0)
+ return IRQ_NONE;
+
+ i = get_mcdma_rx_q(lp, j);
+ q = lp->dq[i];
+
+ status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ cr &= ~(XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+ napi_schedule(&lp->napi[i]);
+ }
+
+ if (!(status & XMCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ if (status & XMCDMA_IRQ_ERR_MASK) {
+ dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+ dev_err(&ndev->dev, "Current BD is at: %pa\n",
+ &q->rxq_bd_v[q->rx_bd_ci].phys);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* Finally write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Disable coalesce, delay timer and error interrupts */
+ cr &= (~XMCDMA_IRQ_ALL_MASK);
+ /* write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ tasklet_schedule(&lp->dma_err_tasklet[i]);
+ axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
+ q->rx_offset, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void axienet_strings(struct net_device *ndev, u32 sset, u8 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ int i, j, k = 0;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_tx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+ k = 0;
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+ q = lp->dq[j];
+ if (i % 2 == 0)
+ k = (q->chan_id - 1) * 2;
+ if (sset == ETH_SS_STATS)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ axienet_get_rx_strings_stats[k].name,
+ ETH_GSTRING_LEN);
+ ++i;
+ k++;
+ if (i % 2 == 0)
+ ++j;
+ }
+}
+
+int axienet_sset_count(struct net_device *ndev, int sset)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (AXIENET_TX_SSTATS_LEN(lp) + AXIENET_RX_SSTATS_LEN(lp));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void axienet_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q;
+ unsigned int i = 0, j;
+
+ for (i = 0, j = 0; i < AXIENET_TX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_tx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->tx_packets;
+ data[i++] = q->tx_bytes;
+ ++j;
+ }
+ for (j = 0; i < AXIENET_TX_SSTATS_LEN(lp) +
+ AXIENET_RX_SSTATS_LEN(lp);) {
+ if (j >= lp->num_rx_queues)
+ break;
+
+ q = lp->dq[j];
+ data[i++] = q->rx_packets;
+ data[i++] = q->rx_bytes;
+ ++j;
+ }
+}
+
+/**
+ * axienet_mcdma_err_handler - Tasklet handler for Axi MCDMA Error
+ * @data: Data passed
+ *
+ * Resets the Axi MCDMA and Axi Ethernet devices, and reconfigures the
+ * Tx/Rx BDs.
+ */
+void __maybe_unused axienet_mcdma_err_handler(unsigned long data)
+{
+ u32 axienet_status;
+ u32 cr, i, chan_en;
+ struct axienet_dma_q *q = (struct axienet_dma_q *)data;
+ struct axienet_local *lp = q->lp;
+ struct net_device *ndev = lp->ndev;
+ struct aximcdma_bd *cur_p;
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ /* Disable the MDIO interface till Axi Ethernet Reset is
+ * Completed. When we do an Axi Ethernet reset, it resets the
+ * Complete core including the MDIO. So if MDIO is not disabled
+ * When the reset process is started,
+ * MDIO will be broken afterwards.
+ */
+ axienet_mdio_disable(lp);
+ axienet_mdio_wait_until_ready(lp);
+ }
+
+ __axienet_device_reset(q);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_mdio_enable(lp);
+ axienet_mdio_wait_until_ready(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+ }
+
+ for (i = 0; i < lp->tx_bd_num; i++) {
+ cur_p = &q->txq_bd_v[i];
+ if (cur_p->phys)
+ dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ (cur_p->cntrl &
+ XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ if (cur_p->tx_skb)
+ dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
+ cur_p->phys = 0;
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ cur_p->sw_id_offset = 0;
+ cur_p->tx_skb = 0;
+ }
+
+ for (i = 0; i < lp->rx_bd_num; i++) {
+ cur_p = &q->rxq_bd_v[i];
+ cur_p->status = 0;
+ cur_p->app0 = 0;
+ cur_p->app1 = 0;
+ cur_p->app2 = 0;
+ cur_p->app3 = 0;
+ cur_p->app4 = 0;
+ }
+
+ q->tx_bd_ci = 0;
+ q->tx_bd_tail = 0;
+ q->rx_bd_ci = 0;
+
+ /* Start updating the Rx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ /* Update the interrupt coalesce count */
+ cr = ((cr & ~XMCDMA_COALESCE_MASK) |
+ ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = ((cr & ~XMCDMA_DELAY_MASK) |
+ (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Rx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset, cr);
+
+ /* Start updating the Tx channel control register */
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ /* Update the interrupt coalesce count */
+ cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
+ ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
+ /* Update the delay timer count */
+ cr = (((cr & ~XMCDMA_DELAY_MASK)) |
+ (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
+ /* Enable coalesce, delay timer and error interrupts */
+ cr |= XMCDMA_IRQ_ALL_MASK;
+ /* Write to the Tx channel control register */
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET + q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
+ q->rx_offset);
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
+ q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
+ (lp->rx_bd_num - 1)));
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
+ q->tx_bd_p);
+ cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
+ axienet_dma_out32(q, XMCDMA_CR_OFFSET,
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
+ axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
+ cr | XMCDMA_CR_RUNSTOP_MASK);
+ chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
+ chan_en |= (1 << (q->chan_id - 1));
+ axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
+ axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
+ axienet_status &= ~XAE_RCW1_RX_MASK;
+ axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
+ }
+
+ if (lp->axienet_config->mactype == XAXIENET_1G && !lp->eth_hasnobuf) {
+ axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
+ if (axienet_status & XAE_INT_RXRJECT_MASK)
+ axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ }
+
+ if (lp->axienet_config->mactype != XAXIENET_10G_25G)
+ axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
+
+ lp->axienet_config->setoptions(ndev, lp->options &
+ ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ axienet_set_mac_address(ndev, NULL);
+ axienet_set_multicast_list(ndev);
+ lp->axienet_config->setoptions(ndev, lp->options);
+}
+
+int __maybe_unused axienet_mcdma_tx_probe(struct platform_device *pdev,
+ struct device_node *np,
+ struct axienet_local *lp)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_tx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "mm2s_ch%d_introut",
+ q->chan_id);
+ q->tx_irq = platform_get_irq_byname(pdev, dma_name);
+ q->eth_hasdre = of_property_read_bool(np,
+ "xlnx,include-dre");
+ spin_lock_init(&q->tx_lock);
+ }
+ of_node_put(np);
+
+ return 0;
+}
+
+int __maybe_unused axienet_mcdma_rx_probe(struct platform_device *pdev,
+ struct axienet_local *lp,
+ struct net_device *ndev)
+{
+ int i;
+ char dma_name[24];
+
+ for_each_rx_dma_queue(lp, i) {
+ struct axienet_dma_q *q;
+
+ q = lp->dq[i];
+
+ q->dma_regs = lp->mcdma_regs;
+ snprintf(dma_name, sizeof(dma_name), "s2mm_ch%d_introut",
+ q->chan_id);
+ q->rx_irq = platform_get_irq_byname(pdev, dma_name);
+
+ spin_lock_init(&q->rx_lock);
+
+ netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
+ XAXIENET_NAPI_WEIGHT);
+ }
+
+ return 0;
+}
+
+static ssize_t rxch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 2 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 3 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 4 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 5 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t rxch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET + q->rx_offset);
+
+ return sprintf(buf, "Ingress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 1 Contents is 0x%x\n",
+ reg);
+}
+
+static ssize_t txch_obs2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 2 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 3 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 4 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 5 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t txch_obs6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ u32 reg;
+
+ reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET);
+
+ return sprintf(buf, "Egress Channel Observer 6 Contents is 0x%x\n\r",
+ reg);
+}
+
+static ssize_t chan_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return sprintf(buf, "chan_id is %d and weight is %d\n",
+ lp->chan_id, lp->weight);
+}
+
+static ssize_t chan_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct axienet_dma_q *q = lp->dq[0];
+ int ret;
+ u16 flags, chan_id;
+ u32 val;
+
+ ret = kstrtou16(buf, 16, &flags);
+ if (ret)
+ return ret;
+
+ lp->chan_id = (flags & 0xF0) >> 4;
+ lp->weight = flags & 0x0F;
+
+ if (lp->chan_id < 8)
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT0_OFFSET);
+ else
+ val = axienet_dma_in32(q, XMCDMA_TXWEIGHT1_OFFSET);
+
+ if (lp->chan_id > 7)
+ chan_id = lp->chan_id - 8;
+ else
+ chan_id = lp->chan_id;
+
+ val &= ~XMCDMA_TXWEIGHT_CH_MASK(chan_id);
+ val |= lp->weight << XMCDMA_TXWEIGHT_CH_SHIFT(chan_id);
+
+ if (lp->chan_id < 8)
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT0_OFFSET, val);
+ else
+ axienet_dma_out32(q, XMCDMA_TXWEIGHT1_OFFSET, val);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(chan_weight);
+static DEVICE_ATTR_RO(rxch_obs1);
+static DEVICE_ATTR_RO(rxch_obs2);
+static DEVICE_ATTR_RO(rxch_obs3);
+static DEVICE_ATTR_RO(rxch_obs4);
+static DEVICE_ATTR_RO(rxch_obs5);
+static DEVICE_ATTR_RO(rxch_obs6);
+static DEVICE_ATTR_RO(txch_obs1);
+static DEVICE_ATTR_RO(txch_obs2);
+static DEVICE_ATTR_RO(txch_obs3);
+static DEVICE_ATTR_RO(txch_obs4);
+static DEVICE_ATTR_RO(txch_obs5);
+static DEVICE_ATTR_RO(txch_obs6);
+static const struct attribute *mcdma_attrs[] = {
+ &dev_attr_chan_weight.attr,
+ &dev_attr_rxch_obs1.attr,
+ &dev_attr_rxch_obs2.attr,
+ &dev_attr_rxch_obs3.attr,
+ &dev_attr_rxch_obs4.attr,
+ &dev_attr_rxch_obs5.attr,
+ &dev_attr_rxch_obs6.attr,
+ &dev_attr_txch_obs1.attr,
+ &dev_attr_txch_obs2.attr,
+ &dev_attr_txch_obs3.attr,
+ &dev_attr_txch_obs4.attr,
+ &dev_attr_txch_obs5.attr,
+ &dev_attr_txch_obs6.attr,
+ NULL,
+};
+
+static const struct attribute_group mcdma_attributes = {
+ .attrs = (struct attribute **)mcdma_attrs,
+};
+
+int axeinet_mcdma_create_sysfs(struct kobject *kobj)
+{
+ return sysfs_create_group(kobj, &mcdma_attributes);
+}
+
+void axeinet_mcdma_remove_sysfs(struct kobject *kobj)
+{
+ sysfs_remove_group(kobj, &mcdma_attributes);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 435ed308d990..a35d4600e161 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -21,7 +21,7 @@
#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
/* Wait till MDIO interface is ready to accept a new transaction.*/
-static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
+int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0c26f5bcc523..96e9d21f2937 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
@@ -91,13 +90,11 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((ulong)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -115,7 +112,7 @@
* @next_tx_buf_to_use: next Tx buffer to write to
* @next_rx_buf_to_use: next Rx buffer to read from
* @base_addr: base address of the Emaclite device
- * @reset_lock: lock used for synchronization
+ * @reset_lock: lock to serialize xmit and tx_timeout execution
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
* @phy_dev: pointer to the PHY device
@@ -124,7 +121,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +129,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +140,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +202,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u32 align_buffer;
u32 *to_u32_ptr;
@@ -264,7 +259,7 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -329,7 +324,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -338,15 +332,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* if it is configured in HW
*/
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -399,7 +394,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* will correct on subsequent calls
*/
if (drvdata->rx_ping_pong != 0)
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((ulong __force)addr ^
XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
@@ -421,7 +416,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -431,23 +425,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -668,8 +664,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -679,8 +674,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -837,6 +831,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
of_address_to_resource(npp, 0, &res);
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
@@ -1191,9 +1186,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
(unsigned int __force)ndev->mem_start,
- (unsigned int __force)lp->base_addr, ndev->irq);
+ (unsigned long __force)lp->base_addr, ndev->irq);
return 0;
error:
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
new file mode 100644
index 000000000000..4902a536c8e0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_cb.c
@@ -0,0 +1,177 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "xilinx_tsn_switch.h"
+
+#define IN_PORTID_MASK 0x3
+#define IN_PORTID_SHIFT 24
+#define MAX_SEQID_MASK 0x0000FFFF
+
+#define SEQ_REC_HIST_LEN_MASK 0x000000FF
+#define SEQ_REC_HIST_LEN_SHIFT 16
+#define SPLIT_STREAM_INPORTID_SHIFT 12
+#define SPLIT_STREAM_INPORTID_MASK 0x3
+#define SPLIT_STREAM_VLANID_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define MEMBER_ID_SHIFT 8
+#define SEQ_RESET_SHIFT 7
+#define REC_TIMEOUT_SHIFT 6
+#define GATE_STATE_SHIFT 5
+#define FRER_VALID_SHIFT 4
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define WR_OP_TYPE_MASK 0x3
+#define FRER_EN_CONTROL_MASK 0x1
+
+/**
+ * frer_control - Configure thr control for frer
+ * @data: Value to be programmed
+ */
+void frer_control(struct frer_ctrl data)
+{
+ u32 mask = 0;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.memb_id << MEMBER_ID_SHIFT;
+ mask |= data.seq_reset << SEQ_RESET_SHIFT;
+ mask |= data.gate_state << GATE_STATE_SHIFT;
+ mask |= data.rcvry_tmout << REC_TIMEOUT_SHIFT;
+ mask |= data.frer_valid << FRER_VALID_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= FRER_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, FRER_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, FRER_CONTROL_OFFSET) & FRER_EN_CONTROL_MASK))
+ ;
+}
+
+/**
+ * get_ingress_filter_config - Get Ingress Filter Configuration
+ * @data: Value returned
+ */
+void get_ingress_filter_config(struct in_fltr *data)
+{
+ u32 reg_val = 0;
+
+ reg_val = axienet_ior(&lp, INGRESS_FILTER_OFFSET);
+
+ data->max_seq_id = reg_val & MAX_SEQID_MASK;
+ data->in_port_id = (reg_val >> IN_PORTID_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Ingress Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_ingress_filter(struct in_fltr data)
+{
+ u32 mask = 0;
+
+ mask = ((data.in_port_id & IN_PORTID_MASK) << IN_PORTID_SHIFT) |
+ (data.max_seq_id & MAX_SEQID_MASK);
+ axienet_iow(&lp, INGRESS_FILTER_OFFSET, mask);
+}
+
+/**
+ * get_member_reg - Read frer member Configuration registers value
+ * @data: Value returned
+ */
+void get_member_reg(struct frer_memb_config *data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = axienet_ior(&lp, FRER_CONFIG_REG1);
+ data->rem_ticks = axienet_ior(&lp, FRER_CONFIG_REG2);
+
+ data->seq_rec_hist_len = (conf_r1 >> SEQ_REC_HIST_LEN_SHIFT)
+ & SEQ_REC_HIST_LEN_MASK;
+ data->split_strm_egport_id = (conf_r1 >> SPLIT_STREAM_INPORTID_SHIFT)
+ & SPLIT_STREAM_INPORTID_MASK;
+ data->split_strm_vlan_id = conf_r1 & SPLIT_STREAM_VLANID_MASK;
+}
+
+/**
+ * program_member_reg - configure frer member Configuration registers
+ * @data: Value to be programmed
+ */
+void program_member_reg(struct frer_memb_config data)
+{
+ u32 conf_r1 = 0;
+
+ conf_r1 = (data.seq_rec_hist_len & SEQ_REC_HIST_LEN_MASK)
+ << SEQ_REC_HIST_LEN_SHIFT;
+ conf_r1 = conf_r1 | ((data.split_strm_egport_id
+ & SPLIT_STREAM_INPORTID_MASK)
+ << SPLIT_STREAM_INPORTID_SHIFT);
+ conf_r1 = conf_r1 | (data.split_strm_vlan_id
+ & SPLIT_STREAM_VLANID_MASK);
+
+ axienet_iow(&lp, FRER_CONFIG_REG1, conf_r1);
+ axienet_iow(&lp, FRER_CONFIG_REG2, data.rem_ticks);
+}
+
+/**
+ * get_frer_static_counter - get frer static counters value
+ * @data: return value, containing counter value
+ */
+void get_frer_static_counter(struct frer_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->frer_fr_count.lsb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset);
+ data->frer_fr_count.msb = axienet_ior(&lp, TOTAL_FRER_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->disc_frames_in_portid.lsb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset);
+ data->disc_frames_in_portid.msb = axienet_ior(&lp,
+ FRER_DISCARD_INGS_FLTR_OFFSET + offset + 0x4);
+
+ data->pass_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset);
+ data->pass_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->disc_frames_ind_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset);
+ data->disc_frames_ind_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_INDV_OFFSET + offset + 0x4);
+
+ data->pass_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset);
+ data->pass_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_PASS_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->disc_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset);
+ data->disc_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_DISCARD_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->rogue_frames_seq_recv.lsb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset);
+ data->rogue_frames_seq_recv.msb = axienet_ior(&lp,
+ FRER_ROGUE_FRAMES_SEQ_OFFSET + offset + 0x4);
+
+ data->seq_recv_rst.lsb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset);
+ data->seq_recv_rst.msb = axienet_ior(&lp,
+ SEQ_RECV_RESETS_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
new file mode 100644
index 000000000000..bcd6c737bc27
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ep.c
@@ -0,0 +1,161 @@
+/*
+ * Xilinx FPGA Xilinx TSN End point driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/skbuff.h>
+
+#include "xilinx_axienet.h"
+
+/**
+ * tsn_ep_ioctl - TSN endpoint ioctl interface.
+ * @dev: Pointer to the net_device structure
+ * @rq: Socket ioctl interface request structure
+ * @cmd: Ioctl case
+ *
+ * Return: 0 on success, Non-zero error value on failure.
+ *
+ * This is the ioctl interface for TSN end point. Currently this
+ * supports only gate programming.
+ */
+static int tsn_ep_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ switch (cmd) {
+#ifdef CONFIG_XILINX_TSN_QBV
+ case SIOCCHIOCTL:
+ return axienet_set_schedule(dev, rq->ifr_data);
+ case SIOC_GET_SCHED:
+ return axienet_get_schedule(dev, rq->ifr_data);
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * tsn_ep_xmit - TSN endpoint xmit routine.
+ * @skb: Packet data
+ * @dev: Pointer to the net_device structure
+ *
+ * Return: Always returns NETDEV_TX_OK.
+ *
+ * This is dummy xmit function for endpoint as all the data path is assumed to
+ * be connected by TEMAC1 as per linux view
+ */
+static int tsn_ep_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ep_netdev_ops = {
+ .ndo_do_ioctl = tsn_ep_ioctl,
+ .ndo_start_xmit = tsn_ep_xmit,
+};
+
+static const struct of_device_id tsn_ep_of_match[] = {
+ { .compatible = "xlnx,tsn-ep"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsn_ep_of_match);
+
+/**
+ * tsn_ep_probe - TSN ep pointer probe function.
+ * @pdev: Pointer to platform device structure.
+ *
+ * Return: 0, on success
+ * Non-zero error value on failure.
+ *
+ * This is the probe routine for TSN endpoint driver.
+ */
+static int tsn_ep_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct axienet_local *lp;
+ struct net_device *ndev;
+ struct resource *ethres;
+ u16 num_tc = 0;
+
+ ndev = alloc_netdev(0, "ep", NET_NAME_UNKNOWN, ether_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &ep_netdev_ops;
+
+ lp = netdev_priv(ndev);
+ lp->ndev = ndev;
+ lp->dev = &pdev->dev;
+ lp->options = XAE_OPTION_DEFAULTS;
+
+ ret = of_property_read_u16(
+ pdev->dev.of_node, "xlnx,num-tc", &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ lp->num_tc = XAE_MAX_TSN_TC;
+ else
+ lp->num_tc = num_tc;
+ /* Map device registers */
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ if (IS_ERR(lp->regs)) {
+ ret = PTR_ERR(lp->regs);
+ goto free_netdev;
+ }
+
+ ret = register_netdev(lp->ndev);
+ if (ret)
+ dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
+
+ return ret;
+
+free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int tsn_ep_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct platform_driver tsn_ep_driver = {
+ .probe = tsn_ep_probe,
+ .remove = tsn_ep_remove,
+ .driver = {
+ .name = "tsn_ep_axienet",
+ .of_match_table = tsn_ep_of_match,
+ },
+};
+
+module_platform_driver(tsn_ep_driver);
+
+MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
new file mode 100644
index 000000000000..f48c2e0cb69e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.c
@@ -0,0 +1,223 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBU/QBR - Frame Preemption module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_preemption.h"
+
+/**
+ * axienet_preemption - Configure Frame Preemption
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 preemp;
+
+ if (copy_from_user(&preemp, useraddr, sizeof(preemp)))
+ return -EFAULT;
+
+ axienet_iow(lp, PREEMPTION_ENABLE_REG, preemp & PREEMPTION_ENABLE);
+ return 0;
+}
+
+/**
+ * axienet_preemption_ctrl - Configure Frame Preemption Control register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_ctrl(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ value &= ~(VERIFY_TIMER_VALUE_MASK << VERIFY_TIMER_VALUE_SHIFT);
+ value |= (data.verify_timer_value << VERIFY_TIMER_VALUE_SHIFT);
+ value &= ~(ADDITIONAL_FRAG_SIZE_MASK << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value |= (data.additional_frag_size << ADDITIONAL_FRAG_SIZE_SHIFT);
+ value &= ~(DISABLE_PREEMPTION_VERIFY);
+ value |= (data.disable_preemp_verify);
+
+ axienet_iow(lp, PREEMPTION_CTRL_STS_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_preemption_sts - Get Frame Preemption Status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing Frame Preemption status
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct preempt_ctrl_sts status;
+ u32 value;
+
+ value = axienet_ior(lp, PREEMPTION_CTRL_STS_REG);
+
+ status.tx_preemp_sts = (value & TX_PREEMPTION_STS) ? 1 : 0;
+ status.mac_tx_verify_sts = (value >> MAC_MERGE_TX_VERIFY_STS_SHIFT) &
+ MAC_MERGE_TX_VERIFY_STS_MASK;
+ status.verify_timer_value = (value >> VERIFY_TIMER_VALUE_SHIFT) &
+ VERIFY_TIMER_VALUE_MASK;
+ status.additional_frag_size = (value >> ADDITIONAL_FRAG_SIZE_SHIFT) &
+ ADDITIONAL_FRAG_SIZE_MASK;
+ status.disable_preemp_verify = value & DISABLE_PREEMPTION_VERIFY;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct preempt_ctrl_sts)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * statistic_cnts - Read statistics counter registers
+ * @ndev: Pointer to the net_device structure
+ * @ptr: Buffer addr to fill the counter values
+ * @count: read #count number of registers
+ * @addr_off: Register address to be read
+ */
+static void statistic_cnts(struct net_device *ndev, void *ptr,
+ unsigned int count, unsigned int addr_off)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int *buf = (int *)ptr;
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ buf[i] = axienet_ior(lp, addr_off);
+ addr_off += 4;
+ }
+}
+
+/**
+ * axienet_preemption_cnt - Get Frame Preemption Statistics counter
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing counters value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_preemption_cnt(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct emac_pmac_stats stats;
+
+ statistic_cnts(ndev, &stats.emac,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_EMAC_REG);
+
+ stats.preemp_en = axienet_ior(lp, PREEMPTION_ENABLE_REG);
+ if (stats.preemp_en) {
+ statistic_cnts(ndev, &stats.pmac.sts,
+ sizeof(struct statistics_counters) / 4,
+ RX_BYTES_PMAC_REG);
+ statistic_cnts(ndev, &stats.pmac.merge,
+ sizeof(struct mac_merge_counters) / 4,
+ TX_HOLD_REG);
+ }
+
+ if (copy_to_user(useraddr, &stats, sizeof(struct emac_pmac_stats)))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * axienet_qbu_user_override - Configure QBU user override register
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: Value to be programmed
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_user_override(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_user data;
+ u32 value;
+
+ if (copy_from_user(&data, useraddr, sizeof(struct qbu_user)))
+ return -EFAULT;
+
+ value = axienet_ior(lp, QBU_USER_OVERRIDE_REG);
+
+ if (data.set & QBU_WINDOW) {
+ if (data.user.hold_rel_window) {
+ value |= USER_HOLD_REL_ENABLE_VALUE;
+ value |= HOLD_REL_WINDOW_OVERRIDE;
+ } else {
+ value &= ~(USER_HOLD_REL_ENABLE_VALUE);
+ value &= ~(HOLD_REL_WINDOW_OVERRIDE);
+ }
+ }
+ if (data.set & QBU_GUARD_BAND) {
+ if (data.user.guard_band)
+ value |= GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE;
+ else
+ value &= ~(GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE);
+ }
+ if (data.set & QBU_HOLD_TIME) {
+ if (data.user.hold_time_override) {
+ value |= HOLD_TIME_OVERRIDE;
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ value |= data.user.user_hold_time <<
+ USER_HOLD_TIME_SHIFT;
+ } else {
+ value &= ~(HOLD_TIME_OVERRIDE);
+ value &= ~(USER_HOLD_TIME_MASK << USER_HOLD_TIME_SHIFT);
+ }
+ }
+ if (data.set & QBU_REL_TIME) {
+ if (data.user.rel_time_override) {
+ value |= REL_TIME_OVERRIDE;
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ value |= data.user.user_rel_time << USER_REL_TIME_SHIFT;
+ } else {
+ value &= ~(REL_TIME_OVERRIDE);
+ value &= ~(USER_REL_TIME_MASK << USER_REL_TIME_SHIFT);
+ }
+ }
+
+ axienet_iow(lp, QBU_USER_OVERRIDE_REG, value);
+ return 0;
+}
+
+/**
+ * axienet_qbu_sts - Get QBU Core status
+ * @ndev: Pointer to the net_device structure
+ * @useraddr: return value, containing QBU core status value
+ * Return: 0 on success, Non-zero error value on failure
+ */
+int axienet_qbu_sts(struct net_device *ndev, void __user *useraddr)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ struct qbu_core_status status;
+ u32 value = 0;
+
+ value = axienet_ior(lp, QBU_CORE_STS_REG);
+ status.hold_time = (value >> HOLD_TIME_STS_SHIFT) & HOLD_TIME_STS_MASK;
+ status.rel_time = (value >> REL_TIME_STS_SHIFT) & REL_TIME_STS_MASK;
+ status.hold_rel_en = (value & HOLD_REL_ENABLE_STS) ? 1 : 0;
+ status.pmac_hold_req = value & PMAC_HOLD_REQ_STS;
+
+ if (copy_to_user(useraddr, &status, sizeof(struct qbu_core_status)))
+ return -EFAULT;
+ return 0;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
new file mode 100644
index 000000000000..d8655513664d
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_preemption.h
@@ -0,0 +1,159 @@
+/**
+ * Xilinx TSN QBU/QBR - Frame Preemption header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Priyadarshini Babu <priyadar@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_PREEMPTION_H
+#define XILINX_TSN_PREEMPTION_H
+
+#define PREEMPTION_ENABLE_REG 0x00000440
+#define PREEMPTION_CTRL_STS_REG 0x00000444
+#define QBU_USER_OVERRIDE_REG 0x00000448
+#define QBU_CORE_STS_REG 0x0000044c
+#define TX_HOLD_REG 0x00000910
+#define RX_BYTES_EMAC_REG 0x00000200
+#define RX_BYTES_PMAC_REG 0x00000800
+
+#define PREEMPTION_ENABLE BIT(0)
+
+#define TX_PREEMPTION_STS BIT(31)
+#define MAC_MERGE_TX_VERIFY_STS_MASK 0x7
+#define MAC_MERGE_TX_VERIFY_STS_SHIFT 24
+#define VERIFY_TIMER_VALUE_MASK 0x7F
+#define VERIFY_TIMER_VALUE_SHIFT 8
+#define ADDITIONAL_FRAG_SIZE_MASK 0x3
+#define ADDITIONAL_FRAG_SIZE_SHIFT 4
+#define DISABLE_PREEMPTION_VERIFY BIT(0)
+
+#define USER_HOLD_REL_ENABLE_VALUE BIT(31)
+#define USER_HOLD_TIME_MASK 0x1FF
+#define USER_HOLD_TIME_SHIFT 16
+#define USER_REL_TIME_MASK 0x3F
+#define USER_REL_TIME_SHIFT 8
+#define GUARD_BAND_OVERRUN_CNT_INC_OVERRIDE BIT(3)
+#define HOLD_REL_WINDOW_OVERRIDE BIT(2)
+#define HOLD_TIME_OVERRIDE BIT(1)
+#define REL_TIME_OVERRIDE BIT(0)
+
+#define HOLD_REL_ENABLE_STS BIT(31)
+#define HOLD_TIME_STS_MASK 0x1FF
+#define HOLD_TIME_STS_SHIFT 16
+#define REL_TIME_STS_MASK 0x3F
+#define REL_TIME_STS_SHIFT 8
+#define PMAC_HOLD_REQ_STS BIT(0)
+
+struct preempt_ctrl_sts {
+ u8 tx_preemp_sts:1;
+ u8 mac_tx_verify_sts:3;
+ u8 verify_timer_value:7;
+ u8 additional_frag_size:2;
+ u8 disable_preemp_verify:1;
+} __packed;
+
+struct qbu_user_override {
+ u8 enable_value:1;
+ u16 user_hold_time:9;
+ u8 user_rel_time:6;
+ u8 guard_band:1;
+ u8 hold_rel_window:1;
+ u8 hold_time_override:1;
+ u8 rel_time_override:1;
+} __packed;
+
+struct qbu_user {
+ struct qbu_user_override user;
+ u8 set;
+};
+
+#define QBU_WINDOW BIT(0)
+#define QBU_GUARD_BAND BIT(1)
+#define QBU_HOLD_TIME BIT(2)
+#define QBU_REL_TIME BIT(3)
+
+struct qbu_core_status {
+ u16 hold_time;
+ u8 rel_time;
+ u8 hold_rel_en:1;
+ u8 pmac_hold_req:1;
+} __packed;
+
+struct cnt_64 {
+ unsigned int msb;
+ unsigned int lsb;
+};
+
+union static_cntr {
+ u64 cnt;
+ struct cnt_64 word;
+};
+
+struct mac_merge_counters {
+ union static_cntr tx_hold_cnt;
+ union static_cntr tx_frag_cnt;
+ union static_cntr rx_assembly_ok_cnt;
+ union static_cntr rx_assembly_err_cnt;
+ union static_cntr rx_smd_err_cnt;
+ union static_cntr rx_frag_cnt;
+};
+
+struct statistics_counters {
+ union static_cntr rx_bytes_cnt;
+ union static_cntr tx_bytes_cnt;
+ union static_cntr undersize_frames_cnt;
+ union static_cntr frag_frames_cnt;
+ union static_cntr rx_64_bytes_frames_cnt;
+ union static_cntr rx_65_127_bytes_frames_cnt;
+ union static_cntr rx_128_255_bytes_frames_cnt;
+ union static_cntr rx_256_511_bytes_frames_cnt;
+ union static_cntr rx_512_1023_bytes_frames_cnt;
+ union static_cntr rx_1024_max_frames_cnt;
+ union static_cntr rx_oversize_frames_cnt;
+ union static_cntr tx_64_bytes_frames_cnt;
+ union static_cntr tx_65_127_bytes_frames_cnt;
+ union static_cntr tx_128_255_bytes_frames_cnt;
+ union static_cntr tx_256_511_bytes_frames_cnt;
+ union static_cntr tx_512_1023_bytes_frames_cnt;
+ union static_cntr tx_1024_max_frames_cnt;
+ union static_cntr tx_oversize_frames_cnt;
+ union static_cntr rx_good_frames_cnt;
+ union static_cntr rx_fcs_err_cnt;
+ union static_cntr rx_good_broadcast_frames_cnt;
+ union static_cntr rx_good_multicast_frames_cnt;
+ union static_cntr rx_good_control_frames_cnt;
+ union static_cntr rx_out_of_range_err_cnt;
+ union static_cntr rx_good_vlan_frames_cnt;
+ union static_cntr rx_good_pause_frames_cnt;
+ union static_cntr rx_bad_opcode_frames_cnt;
+ union static_cntr tx_good_frames_cnt;
+ union static_cntr tx_good_broadcast_frames_cnt;
+ union static_cntr tx_good_multicast_frames_cnt;
+ union static_cntr tx_underrun_err_cnt;
+ union static_cntr tx_good_control_frames_cnt;
+ union static_cntr tx_good_vlan_frames_cnt;
+ union static_cntr tx_good_pause_frames_cnt;
+};
+
+struct pmac_counters {
+ struct statistics_counters sts;
+ struct mac_merge_counters merge;
+};
+
+struct emac_pmac_stats {
+ u8 preemp_en;
+ struct statistics_counters emac;
+ struct pmac_counters pmac;
+};
+
+#endif /* XILINX_TSN_PREEMPTION_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
new file mode 100644
index 000000000000..d81b0acf12f0
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp.h
@@ -0,0 +1,88 @@
+/*
+ * Xilinx TSN PTP header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TSN_PTP_H_
+#define _TSN_PTP_H_
+
+#define PTP_HW_TSTAMP_SIZE 8 /* 64 bit timestamp */
+#define PTP_RX_HWBUF_SIZE 256
+#define PTP_RX_FRAME_SIZE 252
+#define PTP_HW_TSTAMP_OFFSET (PTP_RX_HWBUF_SIZE - PTP_HW_TSTAMP_SIZE)
+
+#define PTP_MSG_TYPE_MASK BIT(3)
+#define PTP_TYPE_SYNC 0x0
+#define PTP_TYPE_FOLLOW_UP 0x8
+#define PTP_TYPE_PDELAYREQ 0x2
+#define PTP_TYPE_PDELAYRESP 0x3
+#define PTP_TYPE_PDELAYRESP_FOLLOW_UP 0xA
+#define PTP_TYPE_ANNOUNCE 0xB
+#define PTP_TYPE_SIGNALING 0xC
+
+#define PTP_TX_CONTROL_OFFSET 0x00012000 /**< Tx PTP Control Reg */
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define RX_FILTER_CONTROL 0x00012008 /**< Rx Filter Ctrl Reg */
+
+#define PTP_RX_BASE_OFFSET 0x00010000
+#define PTP_RX_CONTROL_OFFSET 0x00012004 /**< Rx PTP Control Reg */
+#define PTP_RX_PACKET_FIELD_MASK 0x00000F00
+#define PTP_RX_PACKET_CLEAR 0x00000001
+
+#define PTP_TX_BUFFER_OFFSET(index) (0x00011000 + (index) * 0x100)
+
+#define PTP_TX_CMD_FIELD_LEN 8
+#define PTP_TX_CMD_1STEP_SHIFT BIT(16)
+#define PTP_TX_BUFFER_CMD2_FIELD 0x4
+
+#define PTP_TX_SYNC_OFFSET 0x00011000
+#define PTP_TX_FOLLOW_UP_OFFSET 0x00011100
+#define PTP_TX_PDELAYREQ_OFFSET 0x00011200
+#define PTP_TX_PDELAYRESP_OFFSET 0x00011300
+#define PTP_TX_PDELAYRESP_FOLLOW_UP_OFFSET 0x00011400
+#define PTP_TX_ANNOUNCE_OFFSET 0x00011500
+#define PTP_TX_SIGNALING_OFFSET 0x00011600
+#define PTP_TX_GENERIC_OFFSET 0x00011700
+#define PTP_TX_SEND_SYNC_FRAME_MASK 0x00000001
+#define PTP_TX_SEND_FOLLOWUP_FRAME_MASK 0x00000002
+#define PTP_TX_SEND_PDELAYREQ_FRAME_MASK 0x00000004
+#define PTP_TX_SEND_PDELAYRESP_FRAME_MASK 0x00000008
+#define PTP_TX_SEND_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00000010
+#define PTP_TX_SEND_ANNOUNCE_FRAME_MASK 0x00000020
+#define PTP_TX_SEND_FRAME6_BIT_MASK 0x00000040
+#define PTP_TX_SEND_FRAME7_BIT_MASK 0x00000080
+#define PTP_TX_FRAME_WAITING_MASK 0x0000ff00
+#define PTP_TX_FRAME_WAITING_SHIFT 8
+#define PTP_TX_WAIT_SYNC_FRAME_MASK 0x00000100
+#define PTP_TX_WAIT_FOLLOWUP_FRAME_MASK 0x00000200
+#define PTP_TX_WAIT_PDELAYREQ_FRAME_MASK 0x00000400
+#define PTP_TX_WAIT_PDELAYRESP_FRAME_MASK 0x00000800
+#define PTP_TX_WAIT_PDELAYRESPFOLLOWUP_FRAME_MASK 0x00001000
+#define PTP_TX_WAIT_ANNOUNCE_FRAME_MASK 0x00002000
+#define PTP_TX_WAIT_FRAME6_BIT_MASK 0x00004000
+#define PTP_TX_WAIT_FRAME7_BIT_MASK 0x00008000
+#define PTP_TX_WAIT_ALL_FRAMES_MASK 0x0000FF00
+#define PTP_TX_PACKET_FIELD_MASK 0x00070000
+#define PTP_TX_PACKET_FIELD_SHIFT 16
+/* 1-step Correction Field offset 802.1 ASrev */
+#define PTP_CRCT_FIELD_OFFSET 22
+/* 1-step Time Of Day offset 1588-2008 */
+#define PTP_TOD_FIELD_OFFSET 48
+
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev);
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev);
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev);
+
+#endif
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
new file mode 100644
index 000000000000..05c906019694
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_clock.c
@@ -0,0 +1,325 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP protocol clock Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include "xilinx_tsn_timer.h"
+
+struct xlnx_ptp_timer {
+ struct device *dev;
+ void __iomem *baseaddr;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ spinlock_t reg_lock; /* ptp timer lock */
+ int irq;
+ int pps_enable;
+ int countpulse;
+};
+
+static void xlnx_tod_read(struct xlnx_ptp_timer *timer, struct timespec64 *ts)
+{
+ u32 sec, nsec;
+
+ nsec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_NS);
+ sec = in_be32(timer->baseaddr + XTIMER1588_CURRENT_RTC_SEC_L);
+
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+static void xlnx_rtc_offset_write(struct xlnx_ptp_timer *timer,
+ const struct timespec64 *ts)
+{
+ pr_debug("%s: sec: %ld nsec: %ld\n", __func__, ts->tv_sec, ts->tv_nsec);
+
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_H), 0);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L),
+ (ts->tv_sec));
+ out_be32((timer->baseaddr + XTIMER1588_RTC_OFFSET_NS), ts->tv_nsec);
+}
+
+static void xlnx_rtc_offset_read(struct xlnx_ptp_timer *timer,
+ struct timespec64 *ts)
+{
+ ts->tv_sec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_SEC_L);
+ ts->tv_nsec = in_be32(timer->baseaddr + XTIMER1588_RTC_OFFSET_NS);
+}
+
+/* PTP clock operations
+ */
+static int xlnx_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ int neg_adj = 0;
+ u64 freq;
+ u32 diff, incval;
+
+ /* This number should be replaced by a call to get the frequency
+ * from the device-tree. Currently assumes 125MHz
+ */
+ incval = 0x800000;
+ /* for 156.25 MHZ Ref clk the value is incval = 0x800000; */
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ freq = incval;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ pr_debug("%s: adj: %d ppb: %d\n", __func__, diff, ppb);
+
+ incval = neg_adj ? (incval - diff) : (incval + diff);
+ out_be32((timer->baseaddr + XTIMER1588_RTC_INCREMENT), incval);
+ return 0;
+}
+
+static int xlnx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_rtc_offset_read(timer, &now);
+
+ now = timespec64_add(now, then);
+
+ xlnx_rtc_offset_write(timer, (const struct timespec64 *)&now);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+
+ return 0;
+}
+
+static int xlnx_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ unsigned long flags;
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ xlnx_tod_read(timer, ts);
+
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+/**
+ * xlnx_ptp_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec64 containing the new time for the cycle counter
+ *
+ * Return: 0 in all cases.
+ *
+ * The seconds register is written first, then the nanosecond
+ * The hardware loads the entire new value when a nanosecond register
+ * is written
+ **/
+static int xlnx_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+ struct timespec64 delta, tod;
+ struct timespec64 offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timer->reg_lock, flags);
+
+ /* First zero the offset */
+ offset.tv_sec = 0;
+ offset.tv_nsec = 0;
+ xlnx_rtc_offset_write(timer, &offset);
+
+ /* Get the current timer value */
+ xlnx_tod_read(timer, &tod);
+
+ /* Subtract the current reported time from our desired time */
+ delta = timespec64_sub(*ts, tod);
+
+ /* Don't write a negative offset */
+ if (delta.tv_sec <= 0) {
+ delta.tv_sec = 0;
+ if (delta.tv_nsec < 0)
+ delta.tv_nsec = 0;
+ }
+
+ xlnx_rtc_offset_write(timer, &delta);
+ spin_unlock_irqrestore(&timer->reg_lock, flags);
+ return 0;
+}
+
+static int xlnx_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct xlnx_ptp_timer *timer = container_of(ptp, struct xlnx_ptp_timer,
+ ptp_clock_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ timer->pps_enable = 1;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info xlnx_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "Xilinx Timer",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 1,
+ .adjfreq = xlnx_ptp_adjfreq,
+ .adjtime = xlnx_ptp_adjtime,
+ .gettime64 = xlnx_ptp_gettime,
+ .settime64 = xlnx_ptp_settime,
+ .enable = xlnx_ptp_enable,
+};
+
+/* module operations */
+
+/**
+ * xlnx_ptp_timer_isr - Interrupt Service Routine
+ * @irq: IRQ number
+ * @priv: pointer to the timer structure
+ *
+ * Returns: IRQ_HANDLED for all cases
+ *
+ * Handles the timer interrupt. The timer interrupt fires 128 times per
+ * secound. When our count reaches 128 emit a PTP_CLOCK_PPS event
+ */
+static irqreturn_t xlnx_ptp_timer_isr(int irq, void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_PPS;
+ ++timer->countpulse;
+ if (timer->countpulse >= PULSESIN1PPS) {
+ timer->countpulse = 0;
+ if ((timer->ptp_clock) && (timer->pps_enable))
+ ptp_clock_event(timer->ptp_clock, &event);
+ }
+ out_be32((timer->baseaddr + XTIMER1588_INTERRUPT),
+ (1 << XTIMER1588_INT_SHIFT));
+
+ return IRQ_HANDLED;
+}
+
+int axienet_ptp_timer_remove(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ free_irq(timer->irq, (void *)timer);
+
+ axienet_phc_index = -1;
+ if (timer->ptp_clock) {
+ ptp_clock_unregister(timer->ptp_clock);
+ timer->ptp_clock = NULL;
+ }
+ kfree(timer);
+ return 0;
+}
+
+int axienet_get_phc_index(void *priv)
+{
+ struct xlnx_ptp_timer *timer = (struct xlnx_ptp_timer *)priv;
+
+ if (timer->ptp_clock)
+ return ptp_clock_index(timer->ptp_clock);
+ else
+ return -1;
+}
+
+void *axienet_ptp_timer_probe(void __iomem *base, struct platform_device *pdev)
+{
+ struct xlnx_ptp_timer *timer;
+ struct timespec64 ts;
+ int err = 0;
+
+ timer = kzalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer)
+ return NULL;
+
+ timer->baseaddr = base;
+
+ timer->irq = platform_get_irq_byname(pdev, "interrupt_ptp_timer");
+
+ if (timer->irq < 0) {
+ timer->irq = platform_get_irq_byname(pdev, "rtc_irq");
+ if (timer->irq > 0) {
+ pr_err("ptp timer interrupt name 'rtc_irq' is"
+ "deprecated\n");
+ } else {
+ pr_err("ptp timer interrupt not found\n");
+ kfree(timer);
+ return NULL;
+ }
+ }
+ spin_lock_init(&timer->reg_lock);
+
+ timer->ptp_clock_info = xlnx_ptp_clock_info;
+
+ timer->ptp_clock = ptp_clock_register(&timer->ptp_clock_info,
+ &pdev->dev);
+
+ if (IS_ERR(timer->ptp_clock)) {
+ err = PTR_ERR(timer->ptp_clock);
+ pr_debug("Failed to register ptp clock\n");
+ goto out;
+ }
+
+ axienet_phc_index = ptp_clock_index(timer->ptp_clock);
+
+ ts = ktime_to_timespec64(ktime_get_real());
+
+ xlnx_ptp_settime(&timer->ptp_clock_info, &ts);
+
+ /* Enable interrupts */
+ err = request_irq(timer->irq,
+ xlnx_ptp_timer_isr,
+ 0,
+ "ptp_rtc",
+ (void *)timer);
+ if (err)
+ goto err_irq;
+
+ return timer;
+
+err_irq:
+ ptp_clock_unregister(timer->ptp_clock);
+out:
+ timer->ptp_clock = NULL;
+ return NULL;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
new file mode 100644
index 000000000000..831b4b7b5085
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_ptp_xmit.c
@@ -0,0 +1,369 @@
+/*
+ * Xilinx FPGA Xilinx TSN PTP transfer protocol module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_ptp.h"
+#include "xilinx_tsn_timer.h"
+#include <linux/ptp_classify.h>
+
+#define PTP_ONE_SECOND 1000000000 /**< Value in ns */
+
+#define msg_type_string(type) \
+ ((type) == PTP_TYPE_SYNC) ? "SYNC" : \
+ ((type) == PTP_TYPE_FOLLOW_UP) ? "FOLLOW_UP" : \
+ ((type) == PTP_TYPE_PDELAYREQ) ? "PDELAY_REQ" : \
+ ((type) == PTP_TYPE_PDELAYRESP) ? "PDELAY_RESP" : \
+ ((type) == PTP_TYPE_PDELAYRESP_FOLLOW_UP) ? "PDELAY_RESP_FOLLOW_UP" : \
+ ((type) == PTP_TYPE_ANNOUNCE) ? "ANNOUNCE" : \
+ "UNKNOWN"
+
+/**
+ * memcpy_fromio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Destination buffer
+ * @len: Len to copy
+ *
+ * This functions copies the data from PTP buffer to destination data buffer
+ */
+static void memcpy_fromio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ *(u32 *)data = axienet_ior(lp, offset);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = axienet_ior(lp, offset);
+ u8 *src = (u8 *)&leftover;
+
+ while (len) {
+ *data++ = *src++;
+ len--;
+ }
+ }
+}
+
+/**
+ * memcpy_toio_32 - copy ptp buffer from HW
+ * @lp: Pointer to axienet local structure
+ * @offset: Offset in the PTP buffer
+ * @data: Source data
+ * @len: Len to copy
+ *
+ * This functions copies the source data to desination ptp buffer
+ */
+static void memcpy_toio_32(struct axienet_local *lp,
+ unsigned long offset, u8 *data, size_t len)
+{
+ while (len >= 4) {
+ axienet_iow(lp, offset, *(u32 *)data);
+ len -= 4;
+ offset += 4;
+ data += 4;
+ }
+
+ if (len > 0) {
+ u32 leftover = 0;
+ u8 *dest = (u8 *)&leftover;
+
+ while (len) {
+ *dest++ = *data++;
+ len--;
+ }
+ axienet_iow(lp, offset, leftover);
+ }
+}
+
+static int is_sync(struct sk_buff *skb)
+{
+ u8 *msg_type;
+
+ msg_type = (u8 *)skb->data + ETH_HLEN;
+
+ return (*msg_type & 0xf) == PTP_TYPE_SYNC;
+}
+
+/**
+ * axienet_ptp_xmit - xmit skb using PTP HW
+ * @skb: sk_buff pointer that contains data to be Txed.
+ * @ndev: Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is called to transmit a PTP skb. The function uses
+ * the free PTP TX buffer entry and sends the frame
+ */
+int axienet_ptp_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ u8 msg_type;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long flags;
+ u8 tx_frame_waiting;
+ u8 free_index;
+ u32 cmd1_field = 0;
+ u32 cmd2_field = 0;
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN);
+
+ pr_debug(" -->XMIT: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol,
+ msg_type_string(msg_type & 0xf), skb->len);
+
+ tx_frame_waiting = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_FRAME_WAITING_MASK) >>
+ PTP_TX_FRAME_WAITING_SHIFT;
+
+ /* we reached last frame */
+ if (tx_frame_waiting & (1 << 7)) {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ pr_debug("tx_frame_waiting: %d\n", tx_frame_waiting);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* go to next available slot */
+ free_index = fls(tx_frame_waiting);
+
+ /* write the len */
+ if (lp->ptp_ts_type == HWTSTAMP_TX_ONESTEP_SYNC &&
+ is_sync(skb)) {
+ /* enable 1STEP SYNC */
+ cmd1_field |= PTP_TX_CMD_1STEP_SHIFT;
+ cmd2_field |= PTP_TOD_FIELD_OFFSET;
+ }
+
+ cmd1_field |= skb->len;
+
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index), cmd1_field);
+ axienet_iow(lp, PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_BUFFER_CMD2_FIELD, cmd2_field);
+ memcpy_toio_32(lp,
+ (PTP_TX_BUFFER_OFFSET(free_index) +
+ PTP_TX_CMD_FIELD_LEN),
+ skb->data, skb->len);
+
+ /* send the frame */
+ axienet_iow(lp, PTP_TX_CONTROL_OFFSET, (1 << free_index));
+
+ if (lp->ptp_ts_type != HWTSTAMP_TX_ONESTEP_SYNC ||
+ (!is_sync(skb))) {
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+ skb->cb[0] = free_index;
+ skb_queue_tail(&lp->ptp_txq, skb);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+ }
+ return NETDEV_TX_OK;
+}
+
+/**
+ * axienet_set_timestamp - timestamp skb with HW timestamp
+ * @lp: Pointer to axienet local structure
+ * @hwtstamps: Pointer to skb timestamp structure
+ * @offset: offset of the timestamp in the PTP buffer
+ *
+ * Return: None.
+ *
+ */
+static void axienet_set_timestamp(struct axienet_local *lp,
+ struct skb_shared_hwtstamps *hwtstamps,
+ unsigned int offset)
+{
+ u32 captured_ns;
+ u32 captured_sec;
+
+ captured_ns = axienet_ior(lp, offset + 4);
+ captured_sec = axienet_ior(lp, offset);
+
+ /* Upper 32 bits contain s, lower 32 bits contain ns. */
+ hwtstamps->hwtstamp = ktime_set(captured_sec,
+ captured_ns);
+}
+
+/**
+ * axienet_ptp_recv - receive ptp buffer in skb from HW
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function is called from the ptp rx isr. It allocates skb, and
+ * copies the ptp rx buffer data to it and calls netif_rx for further
+ * processing.
+ *
+ */
+static void axienet_ptp_recv(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned long ptp_frame_base_addr = 0;
+ struct sk_buff *skb;
+ u16 msg_len;
+ u8 msg_type;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ pr_debug("%s:\n ", __func__);
+
+ while (((lp->ptp_rx_hw_pointer & 0xf) !=
+ (lp->ptp_rx_sw_pointer & 0xf))) {
+ skb = netdev_alloc_skb(ndev, PTP_RX_FRAME_SIZE);
+
+ lp->ptp_rx_sw_pointer += 1;
+
+ ptp_frame_base_addr = PTP_RX_BASE_OFFSET +
+ ((lp->ptp_rx_sw_pointer & 0xf) *
+ PTP_RX_HWBUF_SIZE);
+
+ memset(skb->data, 0x0, PTP_RX_FRAME_SIZE);
+
+ memcpy_fromio_32(lp, ptp_frame_base_addr, skb->data,
+ PTP_RX_FRAME_SIZE);
+
+ msg_type = *(u8 *)(skb->data + ETH_HLEN) & 0xf;
+ msg_len = *(u16 *)(skb->data + ETH_HLEN + 2);
+
+ skb_put(skb, ntohs(msg_len) + ETH_HLEN);
+
+ bytes += skb->len;
+ packets++;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ pr_debug(" -->RECV: protocol: %x message: %s frame_len: %d\n",
+ skb->protocol, msg_type_string(msg_type & 0xf),
+ skb->len);
+ /* timestamp only event messages */
+ if (!(msg_type & PTP_MSG_TYPE_MASK)) {
+ axienet_set_timestamp(lp, skb_hwtstamps(skb),
+ (ptp_frame_base_addr +
+ PTP_HW_TSTAMP_OFFSET));
+ }
+
+ netif_rx(skb);
+ }
+ ndev->stats.rx_packets += packets;
+ ndev->stats.rx_bytes += bytes;
+}
+
+/**
+ * axienet_ptp_rx_irq - PTP RX ISR handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ */
+irqreturn_t axienet_ptp_rx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: received\n ", __func__);
+ lp->ptp_rx_hw_pointer = (axienet_ior(lp, PTP_RX_CONTROL_OFFSET)
+ & PTP_RX_PACKET_FIELD_MASK) >> 8;
+
+ axienet_ptp_recv(ndev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * axienet_tx_tstamp - timestamp skb on trasmit path
+ * @work: Pointer to work_struct structure
+ *
+ * This adds TX timestamp to skb
+ */
+void axienet_tx_tstamp(struct work_struct *work)
+{
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ tx_tstamp_work);
+ struct net_device *ndev = lp->ndev;
+ struct skb_shared_hwtstamps hwtstamps;
+ struct sk_buff *skb;
+ unsigned long ts_reg_offset;
+ unsigned long flags;
+ u8 tx_packet;
+ u8 index;
+ u32 bytes = 0;
+ u32 packets = 0;
+
+ memset(&hwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ spin_lock_irqsave(&lp->ptp_tx_lock, flags);
+
+ tx_packet = (axienet_ior(lp, PTP_TX_CONTROL_OFFSET) &
+ PTP_TX_PACKET_FIELD_MASK) >>
+ PTP_TX_PACKET_FIELD_SHIFT;
+
+ while ((skb = __skb_dequeue(&lp->ptp_txq)) != NULL) {
+ index = skb->cb[0];
+
+ /* dequeued packet yet to be xmited? */
+ if (index > tx_packet) {
+ /* enqueue it back and break */
+ skb_queue_tail(&lp->ptp_txq, skb);
+ break;
+ }
+ /* time stamp reg offset */
+ ts_reg_offset = PTP_TX_BUFFER_OFFSET(index) +
+ PTP_HW_TSTAMP_OFFSET;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ axienet_set_timestamp(lp, &hwtstamps, ts_reg_offset);
+ skb_tstamp_tx(skb, &hwtstamps);
+ }
+
+ bytes += skb->len;
+ packets++;
+ dev_kfree_skb_any(skb);
+ }
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += bytes;
+
+ spin_unlock_irqrestore(&lp->ptp_tx_lock, flags);
+}
+
+/**
+ * axienet_ptp_tx_irq - PTP TX irq handler
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED for all cases.
+ *
+ */
+irqreturn_t axienet_ptp_tx_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ pr_debug("%s: got tx interrupt\n", __func__);
+
+ /* read ctrl register to clear the interrupt */
+ axienet_ior(lp, PTP_TX_CONTROL_OFFSET);
+
+ schedule_work(&lp->tx_tstamp_work);
+
+ netif_wake_queue(ndev);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
new file mode 100644
index 000000000000..20efa6c4d365
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_qci.c
@@ -0,0 +1,151 @@
+/*
+ * Xilinx FPGA Xilinx TSN QCI Controller module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+
+#define SMC_MODE_SHIFT 28
+#define SMC_CBR_MASK 0x00FFFFFF
+#define SMC_EBR_MASK 0x00FFFFFF
+#define IN_PORTID_MASK 0x3
+#define IN_PORT_SHIFT 14
+#define MAX_FR_SIZE_MASK 0x00000FFF
+
+#define GATE_ID_SHIFT 24
+#define METER_ID_SHIFT 8
+#define EN_METER_SHIFT 6
+#define ALLOW_STREM_SHIFT 5
+#define EN_PSFP_SHIFT 4
+#define WR_OP_TYPE_MASK 0x3
+#define WR_OP_TYPE_SHIFT 2
+#define OP_TYPE_SHIFT 1
+#define PSFP_EN_CONTROL_MASK 0x1
+
+/**
+ * psfp_control - Configure thr control for PSFP
+ * @data: Value to be programmed
+ */
+void psfp_control(struct psfp_config data)
+{
+ u32 mask;
+ u32 timeout = 20000;
+
+ mask = data.gate_id << GATE_ID_SHIFT;
+ mask |= data.meter_id << METER_ID_SHIFT;
+ mask |= data.en_meter << EN_METER_SHIFT;
+ mask |= data.allow_stream << ALLOW_STREM_SHIFT;
+ mask |= data.en_psfp << EN_PSFP_SHIFT;
+ mask |= (data.wr_op_type & WR_OP_TYPE_MASK) << WR_OP_TYPE_SHIFT;
+ mask |= data.op_type << OP_TYPE_SHIFT;
+ mask |= PSFP_EN_CONTROL_MASK;
+
+ axienet_iow(&lp, PSFP_CONTROL_OFFSET, mask);
+
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, PSFP_CONTROL_OFFSET) &
+ PSFP_EN_CONTROL_MASK) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("PSFP control write took longer time!!");
+}
+
+/**
+ * get_stream_filter_config - Get Stream Filter Configuration
+ * @data: Value returned
+ */
+void get_stream_filter_config(struct stream_filter *data)
+{
+ u32 reg_val;
+
+ reg_val = axienet_ior(&lp, STREAM_FILTER_CONFIG_OFFSET);
+
+ data->max_fr_size = reg_val & MAX_FR_SIZE_MASK;
+ data->in_pid = (reg_val >> IN_PORT_SHIFT) & IN_PORTID_MASK;
+}
+
+/**
+ * config_stream_filter - Configure Stream Filter Configuration
+ * @data: Value to be programmed
+ */
+void config_stream_filter(struct stream_filter data)
+{
+ u32 mask;
+
+ mask = ((data.in_pid & IN_PORTID_MASK) << IN_PORT_SHIFT) |
+ (data.max_fr_size & MAX_FR_SIZE_MASK);
+ axienet_iow(&lp, STREAM_FILTER_CONFIG_OFFSET, mask);
+}
+
+/**
+ * get_meter_reg - Read Stream Meter Configuration registers value
+ * @data: Value returned
+ */
+void get_meter_reg(struct meter_config *data)
+{
+ u32 conf_r4;
+
+ data->cir = axienet_ior(&lp, STREAM_METER_CIR_OFFSET);
+ data->eir = axienet_ior(&lp, STREAM_METER_EIR_OFFSET);
+ data->cbr = axienet_ior(&lp, STREAM_METER_CBR_OFFSET) & SMC_CBR_MASK;
+ conf_r4 = axienet_ior(&lp, STREAM_METER_EBR_OFFSET);
+
+ data->ebr = conf_r4 & SMC_EBR_MASK;
+ data->mode = (conf_r4 & 0xF0000000) >> SMC_MODE_SHIFT;
+}
+
+/**
+ * program_meter_reg - configure Stream Meter Configuration registers
+ * @data: Value to be programmed
+ */
+void program_meter_reg(struct meter_config data)
+{
+ u32 conf_r4;
+
+ axienet_iow(&lp, STREAM_METER_CIR_OFFSET, data.cir);
+ axienet_iow(&lp, STREAM_METER_EIR_OFFSET, data.eir);
+ axienet_iow(&lp, STREAM_METER_CBR_OFFSET, data.cbr & SMC_CBR_MASK);
+
+ conf_r4 = (data.ebr & SMC_EBR_MASK) | (data.mode << SMC_MODE_SHIFT);
+ axienet_iow(&lp, STREAM_METER_EBR_OFFSET, conf_r4);
+}
+
+/**
+ * get_psfp_static_counter - get memory static counters value
+ * @data : return value, containing counter value
+ */
+void get_psfp_static_counter(struct psfp_static_counter *data)
+{
+ int offset = (data->num) * 8;
+
+ data->psfp_fr_count.lsb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset);
+ data->psfp_fr_count.msb = axienet_ior(&lp, TOTAL_PSFP_FRAMES_OFFSET +
+ offset + 0x4);
+
+ data->err_filter_ins_port.lsb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset);
+ data->err_filter_ins_port.msb = axienet_ior(&lp,
+ FLTR_INGS_PORT_ERR_OFFSET + offset + 0x4);
+
+ data->err_filtr_sdu.lsb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset);
+ data->err_filtr_sdu.msb = axienet_ior(&lp, FLTR_STDU_ERR_OFFSET +
+ offset + 0x4);
+
+ data->err_meter.lsb = axienet_ior(&lp, METER_ERR_OFFSET + offset);
+ data->err_meter.msb = axienet_ior(&lp, METER_ERR_OFFSET + offset + 0x4);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
new file mode 100644
index 000000000000..e7a054b78a6e
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.c
@@ -0,0 +1,232 @@
+/*
+ * Xilinx FPGA Xilinx TSN QBV sheduler module.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_axienet.h"
+#include "xilinx_tsn_shaper.h"
+
+static inline int axienet_map_gs_to_hw(struct axienet_local *lp, u32 gs)
+{
+ u8 be_queue = 0;
+ u8 re_queue = 1;
+ u8 st_queue = 2;
+ unsigned int acl_bit_map = 0;
+
+ if (lp->num_tc == 2)
+ st_queue = 1;
+
+ if (gs & GS_BE_OPEN)
+ acl_bit_map |= (1 << be_queue);
+ if (gs & GS_ST_OPEN)
+ acl_bit_map |= (1 << st_queue);
+ if (lp->num_tc == 3 && (gs & GS_RE_OPEN))
+ acl_bit_map |= (1 << re_queue);
+
+ return acl_bit_map;
+}
+
+static int __axienet_set_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i;
+ unsigned int acl_bit_map = 0;
+ u32 u_config_change = 0;
+ u8 port = qbv->port;
+
+ if (qbv->cycle_time == 0) {
+ /* clear the gate enable bit */
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ /* open all the gates */
+ u_config_change |= CC_ADMIN_GATE_STATE_SHIFT;
+
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+ }
+
+ if (axienet_ior(lp, PORT_STATUS(port)) & 1) {
+ if (qbv->force) {
+ u_config_change &= ~CC_ADMIN_GATE_ENABLE_BIT;
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+ } else {
+ return -EALREADY;
+ }
+ }
+ /* write admin time */
+ axienet_iow(lp, ADMIN_CYCLE_TIME_DENOMINATOR(port),
+ qbv->cycle_time & CYCLE_TIME_DENOMINATOR_MASK);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_NS(port), qbv->ptp_time_ns);
+
+ axienet_iow(lp, ADMIN_BASE_TIME_SEC(port),
+ qbv->ptp_time_sec & 0xFFFFFFFF);
+ axienet_iow(lp, ADMIN_BASE_TIME_SECS(port),
+ (qbv->ptp_time_sec >> 32) & BASE_TIME_SECS_MASK);
+
+ u_config_change = axienet_ior(lp, CONFIG_CHANGE(port));
+
+ u_config_change &= ~(CC_ADMIN_CTRL_LIST_LENGTH_MASK <<
+ CC_ADMIN_CTRL_LIST_LENGTH_SHIFT);
+ u_config_change |= (qbv->list_length & CC_ADMIN_CTRL_LIST_LENGTH_MASK)
+ << CC_ADMIN_CTRL_LIST_LENGTH_SHIFT;
+
+ /* program each list */
+ for (i = 0; i < qbv->list_length; i++) {
+ acl_bit_map = axienet_map_gs_to_hw(lp, qbv->acl_gate_state[i]);
+ axienet_iow(lp, ADMIN_CTRL_LIST(port, i),
+ (acl_bit_map & (ACL_GATE_STATE_MASK)) <<
+ ACL_GATE_STATE_SHIFT);
+
+ /* set the time for each entry */
+ axienet_iow(lp, ADMIN_CTRL_LIST_TIME(port, i),
+ qbv->acl_gate_time[i] & CTRL_LIST_TIME_INTERVAL_MASK);
+ }
+
+ /* clear interrupt status */
+ axienet_iow(lp, INT_STATUS(port), 0);
+
+ /* kick in new config change */
+ u_config_change |= CC_ADMIN_CONFIG_CHANGE_BIT;
+
+ /* enable gate */
+ u_config_change |= CC_ADMIN_GATE_ENABLE_BIT;
+
+ /* start */
+ axienet_iow(lp, CONFIG_CHANGE(port), u_config_change);
+
+ return 0;
+}
+
+int axienet_set_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *config;
+ int ret;
+
+ config = kmalloc(sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ if (copy_from_user(config, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ pr_debug("setting new schedule\n");
+
+ ret = __axienet_set_schedule(ndev, config);
+out:
+ kfree(config);
+ return ret;
+}
+
+static int __axienet_get_schedule(struct net_device *ndev, struct qbv_info *qbv)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ u16 i = 0;
+ u32 u_value = 0;
+ u8 port = qbv->port;
+
+ if (!(axienet_ior(lp, CONFIG_CHANGE(port)) &
+ CC_ADMIN_GATE_ENABLE_BIT)) {
+ qbv->cycle_time = 0;
+ return 0;
+ }
+
+ u_value = axienet_ior(lp, GATE_STATE(port));
+ qbv->list_length = (u_value >> CC_ADMIN_CTRL_LIST_LENGTH_SHIFT) &
+ CC_ADMIN_CTRL_LIST_LENGTH_MASK;
+
+ u_value = axienet_ior(lp, OPER_CYCLE_TIME_DENOMINATOR(port));
+ qbv->cycle_time = u_value & CYCLE_TIME_DENOMINATOR_MASK;
+
+ u_value = axienet_ior(lp, OPER_BASE_TIME_NS(port));
+ qbv->ptp_time_ns = u_value & OPER_BASE_TIME_NS_MASK;
+
+ qbv->ptp_time_sec = axienet_ior(lp, OPER_BASE_TIME_SEC(port));
+ u_value = axienet_ior(lp, OPER_BASE_TIME_SECS(port));
+ qbv->ptp_time_sec |= (u64)(u_value & BASE_TIME_SECS_MASK) << 32;
+
+ for (i = 0; i < qbv->list_length; i++) {
+ u_value = axienet_ior(lp, OPER_CTRL_LIST(port, i));
+ qbv->acl_gate_state[i] = (u_value >> ACL_GATE_STATE_SHIFT) &
+ ACL_GATE_STATE_MASK;
+ /**
+ * In 2Q system, the actual ST Gate state value is 2,
+ * for user the ST Gate state value is always 4.
+ */
+ if (lp->num_tc == 2 && qbv->acl_gate_state[i] == 2)
+ qbv->acl_gate_state[i] = 4;
+
+ u_value = axienet_ior(lp, OPER_CTRL_LIST_TIME(port, i));
+ qbv->acl_gate_time[i] = u_value & CTRL_LIST_TIME_INTERVAL_MASK;
+ }
+ return 0;
+}
+
+int axienet_get_schedule(struct net_device *ndev, void __user *useraddr)
+{
+ struct qbv_info *qbv;
+ int ret = 0;
+
+ qbv = kmalloc(sizeof(*qbv), GFP_KERNEL);
+ if (!qbv)
+ return -ENOMEM;
+
+ if (copy_from_user(qbv, useraddr, sizeof(struct qbv_info))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ __axienet_get_schedule(ndev, qbv);
+
+ if (copy_to_user(useraddr, qbv, sizeof(struct qbv_info)))
+ ret = -EFAULT;
+out:
+ kfree(qbv);
+ return ret;
+}
+
+static irqreturn_t axienet_qbv_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ u8 port = 0; /* TODO */
+
+ /* clear status */
+ axienet_iow(lp, INT_CLEAR(port), 0);
+
+ return IRQ_HANDLED;
+}
+
+int axienet_qbv_init(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+ int rc;
+
+ rc = request_irq(lp->qbv_irq, axienet_qbv_irq, 0, ndev->name, ndev);
+ if (rc)
+ goto err_qbv_irq;
+
+err_qbv_irq:
+ return rc;
+}
+
+void axienet_qbv_remove(struct net_device *ndev)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ free_irq(lp->qbv_irq, ndev);
+}
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
new file mode 100644
index 000000000000..ac2e54d0e134
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_shaper.h
@@ -0,0 +1,151 @@
+/*
+ * Xilinx TSN QBV scheduler header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SHAPER_H
+#define XILINX_TSN_SHAPER_H
+
+/* 0x0 CONFIG_CHANGE
+ * 0x8 GATE_STATE
+ * 0x10 ADMIN_CTRL_LIST_LENGTH
+ * 0x18 ADMIN_CYCLE_TIME_DENOMINATOR
+ * 0x20 ADMIN_BASE_TIME_NS
+ * 0x24 ADMIN_BASE_TIME_SEC
+ * 0x28 ADMIN_BASE_TIME_SECS
+ * 0x30 INT_STAT
+ * 0x34 INT_EN
+ * 0x38 INT_CLR
+ * 0x3c STATUS
+ * 0x40 CONFIG_CHANGE_TIME_NS
+ * 0x44 CONFIG_CHANGE_TIME_SEC
+ * 0x48 CONFIG_CHANGE_TIME_SECS
+ * 0x50 OPER_CTRL_LIST_LENGTH
+ * 0x58 OPER_CYCLE_TIME_DENOMINATOR
+ * 0x60 OPER_BASE_TIME_NS
+ * 0x64 OPER_BASE_TIME_SEC
+ * 0x68 OPER_BASE_TIME_SECS
+ * 0x6c BE_XMIT_OVRRUN_CNT
+ * 0x74 RES_XMIT_OVRRUN_CNT
+ * 0x7c ST_XMIT_OVRRUN_CNT
+ */
+
+enum hw_port {
+ PORT_EP = 0,
+ PORT_TEMAC_1,
+ PORT_TEMAC_2,
+};
+
+ /* EP */ /* TEMAC1 */ /* TEMAC2*/
+static u32 qbv_reg_map[3] = { 0x0, 0x14000, 0x14000 };
+
+/* 0x14000 0x14FFC Time Schedule Registers (Control & Status)
+ * 0x15000 0x15FFF Time Schedule Control List Entries
+ */
+
+#define TIME_SCHED_BASE(port) qbv_reg_map[(port)]
+
+#define CTRL_LIST_BASE(port) (TIME_SCHED_BASE(port) + 0x1000)
+
+/* control list entries
+ * admin control list 0 : 31
+ * "Time interval between two gate entries" must be greater than
+ * "time required to transmit biggest supported frame" on that queue when
+ * the gate for the queue is going from open to close state.
+ */
+#define ADMIN_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + ((n) * 8))
+#define ACL_GATE_STATE_SHIFT 8
+#define ACL_GATE_STATE_MASK 0x7
+#define ADMIN_CTRL_LIST_TIME(port, n) (ADMIN_CTRL_LIST((port), n) + 4)
+
+#define OPER_CTRL_LIST(port, n) (CTRL_LIST_BASE(port) + 0x800 + ((n) * 8))
+#define OPER_CTRL_LIST_TIME(port, n) (OPER_CTRL_LIST(port, n) + 4)
+#define CTRL_LIST_TIME_INTERVAL_MASK 0xFFFFF
+
+#define CONFIG_CHANGE(port) (TIME_SCHED_BASE(port) + 0x0)
+#define CC_ADMIN_GATE_STATE_SHIFT 0x7
+#define CC_ADMIN_GATE_STATE_MASK (7)
+#define CC_ADMIN_CTRL_LIST_LENGTH_SHIFT (8)
+#define CC_ADMIN_CTRL_LIST_LENGTH_MASK (0x1FF)
+/* This request bit is set when all the related Admin* filelds are populated.
+ * This bit is set by S/W and clear by core when core start with new schedule.
+ * Once set it can only be cleared by core or hard/soft reset.
+ */
+#define CC_ADMIN_CONFIG_CHANGE_BIT BIT(30)
+#define CC_ADMIN_GATE_ENABLE_BIT BIT(31)
+
+#define GATE_STATE(port) (TIME_SCHED_BASE(port) + 0x8)
+#define GS_OPER_GATE_STATE_SHIFT (0)
+#define GS_OPER_GATE_STATE_MASK (0x7)
+#define GS_OPER_CTRL_LIST_LENGTH_SHIFT (8)
+#define GS_OPER_CTRL_LIST_LENGTH_MASK (0x3F)
+#define GS_SUP_MAX_LIST_LENGTH_SHIFT (16)
+#define GS_SUP_MAX_LIST_LENGTH_MASK (0x3F)
+#define GS_TICK_GRANULARITY_SHIFT (24)
+#define GS_TICK_GRANULARITY_MASK (0x3F)
+
+#define ADMIN_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x18)
+#define ADMIN_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x20)
+#define ADMIN_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x24)
+#define ADMIN_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x28)
+
+#define INT_STATUS(port) (TIME_SCHED_BASE(port) + 0x30)
+#define INT_ENABLE(port) (TIME_SCHED_BASE(port) + 0x34)
+#define INT_CLEAR(port) (TIME_SCHED_BASE(port) + 0x38)
+#define PORT_STATUS(port) (TIME_SCHED_BASE(port) + 0x3c)
+
+/* Config Change time is valid after Config Pending bit is set. */
+#define CONFIG_CHANGE_TIME_NS(port) (TIME_SCHED_BASE((port)) + 0x40)
+#define CONFIG_CHANGE_TIME_SEC(port) (TIME_SCHED_BASE((port)) + 0x44)
+#define CONFIG_CHANGE_TIME_SECS(port) (TIME_SCHED_BASE((port)) + 0x48)
+
+#define OPER_CONTROL_LIST_LENGTH(port) (TIME_SCHED_BASE(port) + 0x50)
+#define OPER_CYCLE_TIME_DENOMINATOR(port) (TIME_SCHED_BASE(port) + 0x58)
+#define CYCLE_TIME_DENOMINATOR_MASK (0x3FFFFFFF)
+
+#define OPER_BASE_TIME_NS(port) (TIME_SCHED_BASE(port) + 0x60)
+#define OPER_BASE_TIME_NS_MASK (0x3FFFFFFF)
+#define OPER_BASE_TIME_SEC(port) (TIME_SCHED_BASE(port) + 0x64)
+#define OPER_BASE_TIME_SECS(port) (TIME_SCHED_BASE(port) + 0x68)
+#define BASE_TIME_SECS_MASK (0xFFFF)
+
+#define BE_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x6c)
+#define RES_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x74)
+#define ST_XMIT_OVERRUN_COUNT(port) (TIME_SCHED_BASE(port) + 0x7c)
+
+/* internally hw deals with queues only,
+ * in 3q system ST acl bitmap would be would 1 << 2
+ * in 2q system ST acl bitmap would be 1 << 1
+ * But this is confusing to users.
+ * so use the following fixed gate state and internally
+ * map them to hw
+ */
+#define GS_BE_OPEN BIT(0)
+#define GS_RE_OPEN BIT(1)
+#define GS_ST_OPEN BIT(2)
+#define QBV_MAX_ENTRIES 256
+
+struct qbv_info {
+ u8 port;
+ u8 force;
+ u32 cycle_time;
+ u64 ptp_time_sec;
+ u32 ptp_time_ns;
+ u32 list_length;
+ u32 acl_gate_state[QBV_MAX_ENTRIES];
+ u32 acl_gate_time[QBV_MAX_ENTRIES];
+};
+
+#endif /* XILINX_TSN_SHAPER_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
new file mode 100644
index 000000000000..cccaaa76cf7a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.c
@@ -0,0 +1,807 @@
+/*
+ * Xilinx FPGA Xilinx TSN Switch Controller driver.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "xilinx_tsn_switch.h"
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+static struct miscdevice switch_dev;
+struct axienet_local lp;
+
+#define ADD 1
+#define DELETE 0
+
+#define PMAP_EGRESS_QUEUE_MASK 0x7
+#define PMAP_EGRESS_QUEUE0_SELECT 0x0
+#define PMAP_EGRESS_QUEUE1_SELECT 0x1
+#define PMAP_EGRESS_QUEUE2_SELECT 0x2
+#define PMAP_PRIORITY0_SHIFT 0
+#define PMAP_PRIORITY1_SHIFT 4
+#define PMAP_PRIORITY2_SHIFT 8
+#define PMAP_PRIORITY3_SHIFT 12
+#define PMAP_PRIORITY4_SHIFT 16
+#define PMAP_PRIORITY5_SHIFT 20
+#define PMAP_PRIORITY6_SHIFT 24
+#define PMAP_PRIORITY7_SHIFT 28
+#define SDL_EN_CAM_IPV_SHIFT 28
+#define SDL_CAM_IPV_SHIFT 29
+
+#define SDL_CAM_WR_ENABLE BIT(0)
+#define SDL_CAM_ADD_ENTRY 0x1
+#define SDL_CAM_DELETE_ENTRY 0x3
+#define SDL_CAM_VLAN_SHIFT 16
+#define SDL_CAM_VLAN_MASK 0xFFF
+#define SDL_CAM_IPV_MASK 0x7
+#define SDL_CAM_PORT_LIST_SHIFT 8
+#define SDL_GATEID_SHIFT 16
+#define SDL_CAM_FWD_TO_EP BIT(0)
+#define SDL_CAM_FWD_TO_PORT_1 BIT(1)
+#define SDL_CAM_FWD_TO_PORT_2 BIT(2)
+#define SDL_CAM_EP_ACTION_LIST_SHIFT 0
+#define SDL_CAM_MAC_ACTION_LIST_SHIFT 4
+#define SDL_CAM_DEST_MAC_XLATION BIT(0)
+#define SDL_CAM_VLAN_ID_XLATION BIT(1)
+#define SDL_CAM_UNTAG_FRAME BIT(2)
+
+/* Match table for of_platform binding */
+static const struct of_device_id tsnswitch_of_match[] = {
+ { .compatible = "xlnx,tsn-switch", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tsnswitch_of_match);
+
+static int switch_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int switch_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* set_frame_filter_option Frame Filtering Type Field Options */
+static void set_frame_filter_opt(u16 type1, u16 type2)
+{
+ int type = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+
+ if (type1)
+ type = (type & 0x0000FFFF) | (type1 << 16);
+ if (type2)
+ type = (type & 0xFFFF0000) | type2;
+ axienet_iow(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET, type);
+}
+
+/* MAC Port-1 Management Queueing Options */
+static void set_mac1_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET, config);
+}
+
+/* MAC Port-2 Management Queueing Options */
+static void set_mac2_mngmntq(u32 config)
+{
+ axienet_iow(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET, config);
+}
+
+/**
+ * set_switch_regs - read the various status of switch
+ * @data: Pointer which will be writen to switch
+ */
+static void set_switch_regs(struct switch_data *data)
+{
+ int tmp;
+ u8 mac_addr[6];
+
+ axienet_iow(&lp, XAS_CONTROL_OFFSET, data->switch_ctrl);
+ axienet_iow(&lp, XAS_PMAP_OFFSET, data->switch_prt);
+ mac_addr[0] = data->sw_mac_addr[0];
+ mac_addr[1] = data->sw_mac_addr[1];
+ mac_addr[2] = data->sw_mac_addr[2];
+ mac_addr[3] = data->sw_mac_addr[3];
+ mac_addr[4] = data->sw_mac_addr[4];
+ mac_addr[5] = data->sw_mac_addr[5];
+ axienet_iow(&lp, XAS_MAC_LSB_OFFSET,
+ (mac_addr[0] << 24) | (mac_addr[1] << 16) |
+ (mac_addr[2] << 8) | (mac_addr[3]));
+ axienet_iow(&lp, XAS_MAC_MSB_OFFSET, (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Threshold */
+ tmp = (data->thld_ep_mac[0].t1 << 16) | data->thld_ep_mac[0].t2;
+ axienet_iow(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[1].t1 << 16) | data->thld_ep_mac[1].t2;
+ axienet_iow(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_ep_mac[2].t1 << 16) | data->thld_ep_mac[2].t2;
+ axienet_iow(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[0].t1 << 16) | data->thld_mac_mac[0].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[1].t1 << 16) | data->thld_mac_mac[1].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET, tmp);
+
+ tmp = (data->thld_mac_mac[2].t1 << 16) | data->thld_mac_mac[2].t2;
+ axienet_iow(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET, tmp);
+
+ /* Port VLAN ID */
+ axienet_iow(&lp, XAS_EP_PORT_VLAN_OFFSET, data->ep_vlan);
+ axienet_iow(&lp, XAS_MAC_PORT_VLAN_OFFSET, data->mac_vlan);
+
+ /* max frame size */
+ axienet_iow(&lp, XAS_ST_MAX_FRAME_SIZE_OFFSET, data->max_frame_sc_que);
+ axienet_iow(&lp, XAS_RE_MAX_FRAME_SIZE_OFFSET, data->max_frame_res_que);
+ axienet_iow(&lp, XAS_BE_MAX_FRAME_SIZE_OFFSET, data->max_frame_be_que);
+}
+
+/**
+ * get_switch_regs - read the various status of switch
+ * @data: Pointer which will return the switch status
+ */
+static void get_switch_regs(struct switch_data *data)
+{
+ int tmp;
+
+ data->switch_status = axienet_ior(&lp, XAS_STATUS_OFFSET);
+ data->switch_ctrl = axienet_ior(&lp, XAS_CONTROL_OFFSET);
+ data->switch_prt = axienet_ior(&lp, XAS_PMAP_OFFSET);
+ tmp = axienet_ior(&lp, XAS_MAC_LSB_OFFSET);
+ data->sw_mac_addr[0] = (tmp & 0xFF000000) >> 24;
+ data->sw_mac_addr[1] = (tmp & 0xFF0000) >> 16;
+ data->sw_mac_addr[2] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[3] = (tmp & 0xFF);
+ tmp = axienet_ior(&lp, XAS_MAC_MSB_OFFSET);
+ data->sw_mac_addr[4] = (tmp & 0xFF00) >> 8;
+ data->sw_mac_addr[5] = (tmp & 0xFF);
+
+ /* Threshold */
+ tmp = axienet_ior(&lp, XAS_EP2MAC_ST_FIFOT_OFFSET);
+ data->thld_ep_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_RE_FIFOT_OFFSET);
+ data->thld_ep_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_EP2MAC_BE_FIFOT_OFFSET);
+ data->thld_ep_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_ep_mac[2].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_ST_FIFOT_OFFSET);
+ data->thld_mac_mac[0].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[0].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_RE_FIFOT_OFFSET);
+ data->thld_mac_mac[1].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[1].t2 = tmp & (0xFFFF);
+
+ tmp = axienet_ior(&lp, XAS_MAC2MAC_BE_FIFOT_OFFSET);
+ data->thld_mac_mac[2].t1 = ((tmp >> 16) & 0xFFFF);
+ data->thld_mac_mac[2].t2 = tmp & (0xFFFF);
+
+ /* Port VLAN ID */
+ data->ep_vlan = axienet_ior(&lp, XAS_EP_PORT_VLAN_OFFSET);
+ data->mac_vlan = axienet_ior(&lp, XAS_MAC_PORT_VLAN_OFFSET);
+
+ /* max frame size */
+ data->max_frame_sc_que = (axienet_ior(&lp,
+ XAS_ST_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_res_que = (axienet_ior(&lp,
+ XAS_RE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+ data->max_frame_be_que = (axienet_ior(&lp,
+ XAS_BE_MAX_FRAME_SIZE_OFFSET) & 0xFFFF);
+
+ /* frame filter type options*/
+ tmp = axienet_ior(&lp, XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET);
+ data->typefield.type2 = (tmp & 0xFFFF0000) >> 16;
+ data->typefield.type2 = tmp & 0x0000FFFF;
+
+ /* MAC Port 1 Management Q option*/
+ data->mac1_config = axienet_ior(&lp, XAS_MAC1_MNG_Q_OPTION_OFFSET);
+ /* MAC Port 2 Management Q option*/
+ data->mac2_config = axienet_ior(&lp, XAS_MAC2_MNG_Q_OPTION_OFFSET);
+
+ /* Port VLAN Membership control*/
+ data->port_vlan_mem_ctrl = axienet_ior(&lp, XAS_VLAN_MEMB_CTRL_REG);
+ /* Port VLAN Membership read data*/
+ data->port_vlan_mem_data = axienet_ior(&lp, XAS_VLAN_MEMB_DATA_REG);
+}
+
+/**
+ * get_memory_static_counter - get memory static counters value
+ * @data: Value to be programmed
+ */
+static void get_memory_static_counter(struct switch_data *data)
+{
+ data->mem_arr_cnt.cam_lookup.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP);
+ data->mem_arr_cnt.cam_lookup.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_CAM_LOOKUP + 0x4);
+
+ data->mem_arr_cnt.multicast_fr.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST);
+ data->mem_arr_cnt.multicast_fr.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_MULTCAST + 0x4);
+
+ data->mem_arr_cnt.err_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1);
+ data->mem_arr_cnt.err_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC1 + 0x4);
+
+ data->mem_arr_cnt.err_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2);
+ data->mem_arr_cnt.err_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP);
+ data->mem_arr_cnt.sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP);
+ data->mem_arr_cnt.res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP);
+ data->mem_arr_cnt.be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP);
+ data->mem_arr_cnt.err_sc_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP);
+ data->mem_arr_cnt.err_res_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac1_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP);
+ data->mem_arr_cnt.err_be_mac1_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_EP + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP);
+ data->mem_arr_cnt.sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP);
+ data->mem_arr_cnt.res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP);
+ data->mem_arr_cnt.be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP);
+ data->mem_arr_cnt.err_sc_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_res_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP);
+ data->mem_arr_cnt.err_res_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_EP + 0x4);
+ data->mem_arr_cnt.err_be_mac2_ep.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP);
+ data->mem_arr_cnt.err_be_mac2_ep.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_EP + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1);
+ data->mem_arr_cnt.sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1);
+ data->mem_arr_cnt.res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1);
+ data->mem_arr_cnt.be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1);
+ data->mem_arr_cnt.err_sc_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1);
+ data->mem_arr_cnt.err_res_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1);
+ data->mem_arr_cnt.err_be_ep_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_sc_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1);
+ data->mem_arr_cnt.err_sc_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_res_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1);
+ data->mem_arr_cnt.err_res_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 + 0x4);
+ data->mem_arr_cnt.err_be_mac2_mac1.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1);
+ data->mem_arr_cnt.err_be_mac2_mac1.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 + 0x4);
+
+ data->mem_arr_cnt.sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2);
+ data->mem_arr_cnt.sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2);
+ data->mem_arr_cnt.res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2);
+ data->mem_arr_cnt.be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2);
+ data->mem_arr_cnt.err_sc_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2);
+ data->mem_arr_cnt.err_res_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_EP_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_ep_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2);
+ data->mem_arr_cnt.err_be_ep_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_EP_MAC2 + 0x4);
+
+ data->mem_arr_cnt.sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_BE_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_sc_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2);
+ data->mem_arr_cnt.err_sc_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_res_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2);
+ data->mem_arr_cnt.err_res_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 + 0x4);
+ data->mem_arr_cnt.err_be_mac1_mac2.lsb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2);
+ data->mem_arr_cnt.err_be_mac1_mac2.msb = axienet_ior(&lp,
+ XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 + 0x4);
+}
+
+static void add_delete_cam_entry(struct cam_struct data, u8 add)
+{
+ u32 port_action = 0;
+ u32 tv2 = 0;
+ u32 timeout = 20000;
+
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+ /* mac and vlan */
+ axienet_iow(&lp, XAS_SDL_CAM_KEY1_OFFSET,
+ (data.dest_addr[0] << 24) | (data.dest_addr[1] << 16) |
+ (data.dest_addr[2] << 8) | (data.dest_addr[3]));
+ axienet_iow(&lp, XAS_SDL_CAM_KEY2_OFFSET,
+ ((data.dest_addr[4] << 8) | data.dest_addr[5]) |
+ ((data.vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT));
+
+ /* TV 1 and TV 2 */
+ axienet_iow(&lp, XAS_SDL_CAM_TV1_OFFSET,
+ (data.src_addr[0] << 24) | (data.src_addr[1] << 16) |
+ (data.src_addr[2] << 8) | (data.src_addr[3]));
+
+ tv2 = ((data.src_addr[4] << 8) | data.src_addr[5]) |
+ ((data.tv_vlanid & SDL_CAM_VLAN_MASK) << SDL_CAM_VLAN_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ tv2 = tv2 | ((data.ipv & SDL_CAM_IPV_MASK) << SDL_CAM_IPV_SHIFT)
+ | (data.en_ipv << SDL_EN_CAM_IPV_SHIFT);
+#endif
+ axienet_iow(&lp, XAS_SDL_CAM_TV2_OFFSET, tv2);
+
+ if (data.tv_en)
+ port_action = ((SDL_CAM_DEST_MAC_XLATION |
+ SDL_CAM_VLAN_ID_XLATION) << SDL_CAM_MAC_ACTION_LIST_SHIFT);
+
+ port_action = port_action | (data.fwd_port << SDL_CAM_PORT_LIST_SHIFT);
+
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI) || IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ port_action = port_action | (data.gate_id << SDL_GATEID_SHIFT);
+#endif
+
+ /* port action */
+ axienet_iow(&lp, XAS_SDL_CAM_PORT_ACT_OFFSET, port_action);
+
+ if (add)
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_ADD_ENTRY);
+ else
+ axienet_iow(&lp, XAS_SDL_CAM_CTRL_OFFSET, SDL_CAM_DELETE_ENTRY);
+
+ timeout = 20000;
+ /* wait for write to complete */
+ while ((axienet_ior(&lp, XAS_SDL_CAM_CTRL_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM write took longer time!!");
+}
+
+static void port_vlan_mem_ctrl(u32 port_vlan_mem)
+{
+ axienet_iow(&lp, XAS_VLAN_MEMB_CTRL_REG, port_vlan_mem);
+}
+
+static long switch_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct switch_data data;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ struct qci qci_data;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ struct cb cb_data;
+#endif
+ switch (cmd) {
+ case GET_STATUS_SWITCH:
+ /* Switch configurations */
+ get_switch_regs(&data);
+
+ /* Memory static counter*/
+ get_memory_static_counter(&data);
+ if (copy_to_user((char __user *)arg, &data, sizeof(data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case SET_STATUS_SWITCH:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_switch_regs(&data);
+ break;
+
+ case ADD_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, ADD);
+ break;
+
+ case DELETE_CAM_ENTRY:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ add_delete_cam_entry(data.cam_data, DELETE);
+ break;
+
+ case PORT_VLAN_MEM_CTRL:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ port_vlan_mem_ctrl(data.port_vlan_mem_ctrl);
+ break;
+
+ case SET_FRAME_TYPE_FIELD:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_frame_filter_opt(data.typefield.type1,
+ data.typefield.type2);
+ break;
+
+ case SET_MAC1_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac1_mngmntq(data.mac1_config);
+ break;
+
+ case SET_MAC2_MNGMNT_Q_CONFIG:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ set_mac2_mngmntq(data.mac2_config);
+ break;
+#if IS_ENABLED(CONFIG_XILINX_TSN_QCI)
+ case CONFIG_METER_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_meter_reg(qci_data.meter_config_data);
+ break;
+
+ case CONFIG_GATE_MEM:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_stream_filter(qci_data.stream_config_data);
+ break;
+
+ case PSFP_CONTROL:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ retval = -EINVAL;
+ pr_err("Copy from user failed\n");
+ goto end;
+ }
+ psfp_control(qci_data.psfp_config_data);
+ break;
+
+ case GET_STATIC_PSFP_COUNTER:
+ if (copy_from_user(&qci_data, (char __user *)arg,
+ sizeof(qci_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_psfp_static_counter(&qci_data.psfp_counter_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_METER_REG:
+ get_meter_reg(&qci_data.meter_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+ case GET_STREAM_FLTR_CONFIG:
+ get_stream_filter_config(&qci_data.stream_config_data);
+ if (copy_to_user((char __user *)arg, &qci_data,
+ sizeof(qci_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+#if IS_ENABLED(CONFIG_XILINX_TSN_CB)
+ case CONFIG_MEMBER_MEM:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ program_member_reg(cb_data.frer_memb_config_data);
+ break;
+
+ case CONFIG_INGRESS_FLTR:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ config_ingress_filter(cb_data.in_fltr_data);
+ break;
+
+ case FRER_CONTROL:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ frer_control(cb_data.frer_ctrl_data);
+ break;
+
+ case GET_STATIC_FRER_COUNTER:
+ if (copy_from_user(&cb_data, (char __user *)arg,
+ sizeof(cb_data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ get_frer_static_counter(&cb_data.frer_counter_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_MEMBER_REG:
+ get_member_reg(&cb_data.frer_memb_config_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+
+ case GET_INGRESS_FLTR:
+ get_ingress_filter_config(&cb_data.in_fltr_data);
+ if (copy_to_user((char __user *)arg, &cb_data,
+ sizeof(cb_data))) {
+ pr_err("Copy to user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ break;
+#endif
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations switch_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = switch_ioctl,
+ .open = switch_open,
+ .release = switch_release,
+};
+
+static int tsn_switch_init(void)
+{
+ int ret;
+
+ switch_dev.minor = MISC_DYNAMIC_MINOR;
+ switch_dev.name = "switch";
+ switch_dev.fops = &switch_fops;
+ ret = misc_register(&switch_dev);
+ if (ret < 0) {
+ pr_err("Switch driver registration failed!\n");
+ return ret;
+ }
+
+ pr_debug("Xilinx TSN Switch driver initialized!\n");
+ return 0;
+}
+
+static int tsn_switch_cam_init(u16 num_q)
+{
+ u32 pmap;
+ u32 timeout = 20000;
+
+ /* wait for switch init done */
+ while (!(axienet_ior(&lp, XAS_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("Switch init took longer time!!");
+
+ if (num_q == 3) {
+ /* map pcp = 2,3 to queue1
+ * pcp = 4 to queue2
+ */
+ pmap = ((PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY2_SHIFT) |
+ (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY3_SHIFT) |
+ (PMAP_EGRESS_QUEUE2_SELECT << PMAP_PRIORITY4_SHIFT));
+ } else if (num_q == 2) {
+ /* pcp = 4 to queue1 */
+ pmap = (PMAP_EGRESS_QUEUE1_SELECT << PMAP_PRIORITY4_SHIFT);
+ }
+
+ axienet_iow(&lp, XAS_PMAP_OFFSET, pmap);
+
+ timeout = 20000;
+ /* wait for cam init done */
+ while (!(axienet_ior(&lp, XAS_SDL_CAM_STATUS_OFFSET) &
+ SDL_CAM_WR_ENABLE) && timeout)
+ timeout--;
+
+ if (!timeout)
+ pr_warn("CAM init took longer time!!");
+
+ return 0;
+}
+
+static int tsnswitch_probe(struct platform_device *pdev)
+{
+ struct resource *swt;
+ int ret;
+ u16 num_tc;
+
+ pr_info("TSN Switch probe\n");
+ /* Map device registers */
+ swt = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp.regs = devm_ioremap_resource(&pdev->dev, swt);
+ if (IS_ERR(lp.regs))
+ return PTR_ERR(lp.regs);
+
+ ret = of_property_read_u16(pdev->dev.of_node, "xlnx,num-tc",
+ &num_tc);
+ if (ret || (num_tc != 2 && num_tc != 3))
+ num_tc = XAE_MAX_TSN_TC;
+
+ pr_info("TSN Switch Initializing ....\n");
+ ret = tsn_switch_init();
+ if (ret)
+ return ret;
+ pr_info("TSN CAM Initializing ....\n");
+ ret = tsn_switch_cam_init(num_tc);
+
+ return ret;
+}
+
+static int tsnswitch_remove(struct platform_device *pdev)
+{
+ misc_deregister(&switch_dev);
+ return 0;
+}
+
+static struct platform_driver tsnswitch_driver = {
+ .probe = tsnswitch_probe,
+ .remove = tsnswitch_remove,
+ .driver = {
+ .name = "xilinx_tsnswitch",
+ .of_match_table = tsnswitch_of_match,
+ },
+};
+
+module_platform_driver(tsnswitch_driver);
+
+MODULE_DESCRIPTION("Xilinx TSN Switch driver");
+MODULE_AUTHOR("Xilinx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
new file mode 100644
index 000000000000..9e5e21aea127
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_switch.h
@@ -0,0 +1,364 @@
+/*
+ * Xilinx TSN core switch header
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Author: Saurabh Sengar <saurabhs@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef XILINX_TSN_SWITCH_H
+#define XILINX_TSN_SWITCH_H
+
+#include "xilinx_axienet.h"
+
+/* ioctls */
+#define GET_STATUS_SWITCH 0x16
+#define SET_STATUS_SWITCH 0x17
+#define ADD_CAM_ENTRY 0x18
+#define DELETE_CAM_ENTRY 0x19
+#define PORT_VLAN_MEM_CTRL 0x20
+#define SET_FRAME_TYPE_FIELD 0x21
+#define SET_MAC1_MNGMNT_Q_CONFIG 0x22
+#define SET_MAC2_MNGMNT_Q_CONFIG 0x23
+#define CONFIG_METER_MEM 0x24
+#define CONFIG_GATE_MEM 0x25
+#define PSFP_CONTROL 0x26
+#define GET_STATIC_PSFP_COUNTER 0x27
+#define GET_METER_REG 0x28
+#define GET_STREAM_FLTR_CONFIG 0x29
+#define CONFIG_MEMBER_MEM 0x2A
+#define CONFIG_INGRESS_FLTR 0x2B
+#define FRER_CONTROL 0x2C
+#define GET_STATIC_FRER_COUNTER 0x2D
+#define GET_MEMBER_REG 0x2E
+#define GET_INGRESS_FLTR 0x2F
+
+/* Xilinx Axi Switch Offsets*/
+#define XAS_STATUS_OFFSET 0x00000
+#define XAS_CONTROL_OFFSET 0x00004
+#define XAS_PMAP_OFFSET 0x00008
+#define XAS_MAC_LSB_OFFSET 0x0000C
+#define XAS_MAC_MSB_OFFSET 0x00010
+#define XAS_EP2MAC_ST_FIFOT_OFFSET 0x00020
+#define XAS_EP2MAC_RE_FIFOT_OFFSET 0x00024
+#define XAS_EP2MAC_BE_FIFOT_OFFSET 0x00028
+#define XAS_MAC2MAC_ST_FIFOT_OFFSET 0x00030
+#define XAS_MAC2MAC_RE_FIFOT_OFFSET 0x00034
+#define XAS_MAC2MAC_BE_FIFOT_OFFSET 0x00038
+#define XAS_EP_PORT_VLAN_OFFSET 0x00040
+#define XAS_MAC_PORT_VLAN_OFFSET 0x00044
+#define XAS_FRM_FLTR_TYPE_FIELD_OPT_OFFSET 0x00050
+#define XAS_MAC2_MNG_Q_OPTION_OFFSET 0x00054
+#define XAS_MAC1_MNG_Q_OPTION_OFFSET 0x00058
+#define XAS_ST_MAX_FRAME_SIZE_OFFSET 0x00060
+#define XAS_RE_MAX_FRAME_SIZE_OFFSET 0x00064
+#define XAS_BE_MAX_FRAME_SIZE_OFFSET 0x00068
+
+/* Memory static counters */
+#define XAS_MEM_STCNTR_CAM_LOOKUP 0x00400
+#define XAS_MEM_STCNTR_MULTCAST 0x00408
+#define XAS_MEM_STCNTR_ERR_MAC1 0x00410
+#define XAS_MEM_STCNTR_ERR_MAC2 0x00418
+#define XAS_MEM_STCNTR_SC_MAC1_EP 0x00420
+#define XAS_MEM_STCNTR_RES_MAC1_EP 0x00428
+#define XAS_MEM_STCNTR_BE_MAC1_EP 0x00430
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_EP 0x00438
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_EP 0x00440
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_EP 0x00448
+#define XAS_MEM_STCNTR_SC_MAC2_EP 0x00458
+#define XAS_MEM_STCNTR_RES_MAC2_EP 0x00460
+#define XAS_MEM_STCNTR_BE_MAC2_EP 0x00468
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_EP 0x00470
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_EP 0x00478
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_EP 0x00480
+#define XAS_MEM_STCNTR_SC_EP_MAC1 0x00490
+#define XAS_MEM_STCNTR_RES_EP_MAC1 0x00498
+#define XAS_MEM_STCNTR_BE_EP_MAC1 0x004A0
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC1 0x004A8
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC1 0x004B0
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC1 0x004B8
+#define XAS_MEM_STCNTR_SC_MAC2_MAC1 0x004C0
+#define XAS_MEM_STCNTR_RES_MAC2_MAC1 0x004C8
+#define XAS_MEM_STCNTR_BE_MAC2_MAC1 0x004D0
+#define XAS_MEM_STCNTR_ERR_SC_MAC2_MAC1 0x004D8
+#define XAS_MEM_STCNTR_ERR_RES_MAC2_MAC1 0x004E0
+#define XAS_MEM_STCNTR_ERR_BE_MAC2_MAC1 0x004E8
+#define XAS_MEM_STCNTR_SC_EP_MAC2 0x004F0
+#define XAS_MEM_STCNTR_RES_EP_MAC2 0x004F8
+#define XAS_MEM_STCNTR_BE_EP_MAC2 0x00500
+#define XAS_MEM_STCNTR_ERR_SC_EP_MAC2 0x00508
+#define XAS_MEM_STCNTR_ERR_RES_EP_MAC2 0x00510
+#define XAS_MEM_STCNTR_ERR_BE_EP_MAC2 0x00518
+#define XAS_MEM_STCNTR_SC_MAC1_MAC2 0x00520
+#define XAS_MEM_STCNTR_RES_MAC1_MAC2 0x00528
+#define XAS_MEM_STCNTR_BE_MAC1_MAC2 0x00530
+#define XAS_MEM_STCNTR_ERR_SC_MAC1_MAC2 0x00538
+#define XAS_MEM_STCNTR_ERR_RES_MAC1_MAC2 0x00540
+#define XAS_MEM_STCNTR_ERR_BE_MAC1_MAC2 0x00548
+
+/* Stream Destination Lookup CAM */
+#define XAS_SDL_CAM_CTRL_OFFSET 0x1000
+#define XAS_SDL_CAM_STATUS_OFFSET 0x1004
+#define XAS_SDL_CAM_KEY1_OFFSET 0x1008
+#define XAS_SDL_CAM_KEY2_OFFSET 0x100C
+#define XAS_SDL_CAM_TV1_OFFSET 0x1010
+#define XAS_SDL_CAM_TV2_OFFSET 0x1014
+#define XAS_SDL_CAM_PORT_ACT_OFFSET 0x1018
+
+/* Port VLAN Membership Memory */
+#define XAS_VLAN_MEMB_CTRL_REG 0x1100
+#define XAS_VLAN_MEMB_DATA_REG 0x1104
+
+/* QCI */
+#define PSFP_CONTROL_OFFSET 0x1200
+#define STREAM_FILTER_CONFIG_OFFSET 0x1204
+#define STREAM_METER_CIR_OFFSET 0x1208
+#define STREAM_METER_EIR_OFFSET 0x120C
+#define STREAM_METER_CBR_OFFSET 0x1210
+#define STREAM_METER_EBR_OFFSET 0x1214
+
+/* PSFP Statistics Counters */
+#define TOTAL_PSFP_FRAMES_OFFSET 0x2000
+#define FLTR_INGS_PORT_ERR_OFFSET 0x2800
+#define FLTR_STDU_ERR_OFFSET 0x3000
+#define METER_ERR_OFFSET 0x3800
+
+/* CB */
+#define FRER_CONTROL_OFFSET 0x1300
+#define INGRESS_FILTER_OFFSET 0x1304
+#define FRER_CONFIG_REG1 0x1308
+#define FRER_CONFIG_REG2 0x130C
+
+/* FRER Statistics Counters */
+#define TOTAL_FRER_FRAMES_OFFSET 0x4000
+#define FRER_DISCARD_INGS_FLTR_OFFSET 0x4800
+#define FRER_PASS_FRAMES_INDV_OFFSET 0x5000
+#define FRER_DISCARD_FRAMES_INDV_OFFSET 0x5800
+#define FRER_PASS_FRAMES_SEQ_OFFSET 0x6000
+#define FRER_DISCARD_FRAMES_SEQ_OFFSET 0x6800
+#define FRER_ROGUE_FRAMES_SEQ_OFFSET 0x7000
+#define SEQ_RECV_RESETS_OFFSET 0x7800
+
+/* 64 bit counter*/
+struct static_cntr {
+ u32 msb;
+ u32 lsb;
+};
+
+/*********** QCI Structures **************/
+struct psfp_config {
+ u8 gate_id;
+ u8 meter_id;
+ bool en_meter;
+ bool allow_stream;
+ bool en_psfp;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct meter_config {
+ u32 cir;
+ u32 eir;
+ u32 cbr;
+ u32 ebr;
+ u8 mode;
+};
+
+struct stream_filter {
+ u8 in_pid; /* ingress port id*/
+ u16 max_fr_size; /* max frame size*/
+};
+
+/* PSFP Static counter*/
+struct psfp_static_counter {
+ struct static_cntr psfp_fr_count;
+ struct static_cntr err_filter_ins_port;
+ struct static_cntr err_filtr_sdu;
+ struct static_cntr err_meter;
+ unsigned char num;
+};
+
+/* QCI Core stuctures */
+struct qci {
+ struct meter_config meter_config_data;
+ struct stream_filter stream_config_data;
+ struct psfp_config psfp_config_data;
+ struct psfp_static_counter psfp_counter_data;
+};
+
+/************* QCI Structures end *************/
+
+/*********** CB Structures **************/
+struct frer_ctrl {
+ u8 gate_id;
+ u8 memb_id;
+ bool seq_reset;
+ bool gate_state;
+ bool rcvry_tmout;
+ bool frer_valid;
+ u8 wr_op_type;
+ bool op_type;
+};
+
+struct in_fltr {
+ u8 in_port_id;
+ u16 max_seq_id;
+};
+
+struct frer_memb_config {
+ u8 seq_rec_hist_len;
+ u8 split_strm_egport_id;
+ u16 split_strm_vlan_id;
+ u32 rem_ticks;
+};
+
+/* FRER Static counter*/
+struct frer_static_counter {
+ struct static_cntr frer_fr_count;
+ struct static_cntr disc_frames_in_portid;
+ struct static_cntr pass_frames_seq_recv;
+ struct static_cntr disc_frames_seq_recv;
+ struct static_cntr rogue_frames_seq_recv;
+ struct static_cntr pass_frames_ind_recv;
+ struct static_cntr disc_frames_ind_recv;
+ struct static_cntr seq_recv_rst;
+ unsigned char num;
+};
+
+/* CB Core stuctures */
+struct cb {
+ struct frer_ctrl frer_ctrl_data;
+ struct in_fltr in_fltr_data;
+ struct frer_memb_config frer_memb_config_data;
+ struct frer_static_counter frer_counter_data;
+};
+
+/************* CB Structures end *************/
+
+/********* Switch Structures Starts ***********/
+struct thershold {
+ u16 t1;
+ u16 t2;
+};
+
+/* memory static counters */
+struct mem_static_arr_cntr {
+ struct static_cntr cam_lookup;
+ struct static_cntr multicast_fr;
+ struct static_cntr err_mac1;
+ struct static_cntr err_mac2;
+ struct static_cntr sc_mac1_ep;
+ struct static_cntr res_mac1_ep;
+ struct static_cntr be_mac1_ep;
+ struct static_cntr err_sc_mac1_ep;
+ struct static_cntr err_res_mac1_ep;
+ struct static_cntr err_be_mac1_ep;
+ struct static_cntr sc_mac2_ep;
+ struct static_cntr res_mac2_ep;
+ struct static_cntr be_mac2_ep;
+ struct static_cntr err_sc_mac2_ep;
+ struct static_cntr err_res_mac2_ep;
+ struct static_cntr err_be_mac2_ep;
+ struct static_cntr sc_ep_mac1;
+ struct static_cntr res_ep_mac1;
+ struct static_cntr be_ep_mac1;
+ struct static_cntr err_sc_ep_mac1;
+ struct static_cntr err_res_ep_mac1;
+ struct static_cntr err_be_ep_mac1;
+ struct static_cntr sc_mac2_mac1;
+ struct static_cntr res_mac2_mac1;
+ struct static_cntr be_mac2_mac1;
+ struct static_cntr err_sc_mac2_mac1;
+ struct static_cntr err_res_mac2_mac1;
+ struct static_cntr err_be_mac2_mac1;
+ struct static_cntr sc_ep_mac2;
+ struct static_cntr res_ep_mac2;
+ struct static_cntr be_ep_mac2;
+ struct static_cntr err_sc_ep_mac2;
+ struct static_cntr err_res_ep_mac2;
+ struct static_cntr err_be_ep_mac2;
+ struct static_cntr sc_mac1_mac2;
+ struct static_cntr res_mac1_mac2;
+ struct static_cntr be_mac1_mac2;
+ struct static_cntr err_sc_mac1_mac2;
+ struct static_cntr err_res_mac1_mac2;
+ struct static_cntr err_be_mac1_mac2;
+};
+
+/* CAM structure */
+struct cam_struct {
+ u8 src_addr[6];
+ u8 dest_addr[6];
+ u16 vlanid;
+ u16 tv_vlanid;
+ u8 fwd_port;
+ bool tv_en;
+ u8 gate_id;
+ u8 ipv;
+ bool en_ipv;
+};
+
+/*Frame Filtering Type Field Option */
+struct ff_type {
+ u16 type1;
+ u16 type2;
+};
+
+/* Core switch structure*/
+struct switch_data {
+ u32 switch_status;
+ u32 switch_ctrl;
+ u32 switch_prt;
+ u8 sw_mac_addr[6];
+ /*0 - schedule, 1 - reserved, 2 - best effort queue*/
+ struct thershold thld_ep_mac[3];
+ struct thershold thld_mac_mac[3];
+ u32 ep_vlan;
+ u32 mac_vlan;
+ u32 max_frame_sc_que;
+ u32 max_frame_res_que;
+ u32 max_frame_be_que;
+ /* Memory counters */
+ struct mem_static_arr_cntr mem_arr_cnt;
+ /* CAM */
+ struct cam_struct cam_data;
+/* Frame Filtering Type Field Option */
+ struct ff_type typefield;
+/* MAC Port-1 Management Queueing Options */
+ int mac1_config;
+/* MAC Port-2 Management Queueing Options */
+ int mac2_config;
+/* Port VLAN Membership Registers */
+ int port_vlan_mem_ctrl;
+ char port_vlan_mem_data;
+};
+
+/********* Switch Structures ends ***********/
+
+extern struct axienet_local lp;
+
+/********* qci function declararions ********/
+void psfp_control(struct psfp_config data);
+void config_stream_filter(struct stream_filter data);
+void program_meter_reg(struct meter_config data);
+void get_psfp_static_counter(struct psfp_static_counter *data);
+void get_meter_reg(struct meter_config *data);
+void get_stream_filter_config(struct stream_filter *data);
+
+/********* cb function declararions ********/
+void frer_control(struct frer_ctrl data);
+void get_ingress_filter_config(struct in_fltr *data);
+void config_ingress_filter(struct in_fltr data);
+void get_member_reg(struct frer_memb_config *data);
+void program_member_reg(struct frer_memb_config data);
+void get_frer_static_counter(struct frer_static_counter *data);
+#endif /* XILINX_TSN_SWITCH_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
new file mode 100644
index 000000000000..4bb74e78d89a
--- /dev/null
+++ b/drivers/net/ethernet/xilinx/xilinx_tsn_timer.h
@@ -0,0 +1,73 @@
+/*
+ * Xilinx FPGA Xilinx TSN timer module header.
+ *
+ * Copyright (c) 2017 Xilinx Pvt., Ltd
+ *
+ * Author: Syed S <syeds@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XILINX_TSN_H_
+#define _XILINX_TSN_H_
+
+#include <linux/platform_device.h>
+
+#define XAE_RTC_OFFSET 0x12800
+/* RTC Nanoseconds Field Offset Register */
+#define XTIMER1588_RTC_OFFSET_NS 0x00000
+/* RTC Seconds Field Offset Register - Low */
+#define XTIMER1588_RTC_OFFSET_SEC_L 0x00008
+/* RTC Seconds Field Offset Register - High */
+#define XTIMER1588_RTC_OFFSET_SEC_H 0x0000C
+/* RTC Increment */
+#define XTIMER1588_RTC_INCREMENT 0x00010
+/* Current TOD Nanoseconds - RO */
+#define XTIMER1588_CURRENT_RTC_NS 0x00014
+/* Current TOD Seconds -Low RO */
+#define XTIMER1588_CURRENT_RTC_SEC_L 0x00018
+/* Current TOD Seconds -High RO */
+#define XTIMER1588_CURRENT_RTC_SEC_H 0x0001C
+#define XTIMER1588_SYNTONIZED_NS 0x0002C
+#define XTIMER1588_SYNTONIZED_SEC_L 0x00030
+#define XTIMER1588_SYNTONIZED_SEC_H 0x00034
+/* Write to Bit 0 to clear the interrupt */
+#define XTIMER1588_INTERRUPT 0x00020
+/* 8kHz Pulse Offset Register */
+#define XTIMER1588_8KPULSE 0x00024
+/* Correction Field - Low */
+#define XTIMER1588_CF_L 0x0002C
+/* Correction Field - Low */
+#define XTIMER1588_CF_H 0x00030
+
+#define XTIMER1588_RTC_MASK ((1 << 26) - 1)
+#define XTIMER1588_INT_SHIFT 0
+#define NANOSECOND_BITS 20
+#define NANOSECOND_MASK ((1 << NANOSECOND_BITS) - 1)
+#define SECOND_MASK ((1 << (32 - NANOSECOND_BITS)) - 1)
+#define XTIMER1588_RTC_INCREMENT_SHIFT 20
+#define PULSESIN1PPS 128
+
+/* Read/Write access to the registers */
+#ifndef out_be32
+#if defined(CONFIG_ARCH_ZYNQ) || defined(CONFIG_ARCH_ZYNQMP)
+#define in_be32(offset) __raw_readl(offset)
+#define out_be32(offset, val) __raw_writel(val, offset)
+#endif
+#endif
+
+/* The tsn ptp module will set this variable */
+extern int axienet_phc_index;
+
+void *axienet_ptp_timer_probe(void __iomem *base,
+ struct platform_device *pdev);
+int axienet_ptp_timer_remove(void *priv);
+int axienet_get_phc_index(void *priv);
+#endif
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d140e3c93fe3..24a4dfdf936a 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -463,7 +463,8 @@ config MICROSEMI_PHY
depends on MACSEC || MACSEC=n
select CRYPTO_LIB_AES if MACSEC
---help---
- Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
+ Currently supports VSC8514, VSC8530, VSC8531, VSC8531_02, VSC8540 and
+ VSC8541 PHYs
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
@@ -522,6 +523,11 @@ config VITESSE_PHY
---help---
Currently supports the vsc8244
+config XILINX_PHY
+ tristate "Drivers for xilinx PHYs"
+ ---help---
+ This module provides a driver for the Xilinx PCS/PMA Core.
+
config XILINX_GMII2RGMII
tristate "Xilinx GMII2RGMII converter driver"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2f5c7093a65b..e17e02110834 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -101,4 +101,5 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_XILINX_PHY) += xilinx_phy.o
obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index ddac79960ea7..1d6df3a1bfe6 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -26,6 +26,7 @@
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CFG2 0x14
+#define MII_DP83867_BISCR 0x16
#define DP83867_CFG3 0x1e
#define DP83867_CTRL 0x1f
@@ -104,6 +105,11 @@
#define DP83867_PHYCR_RX_FIFO_DEPTH_MASK GENMASK(13, 12)
#define DP83867_PHYCR_RESERVED_MASK BIT(11)
#define DP83867_PHYCR_FORCE_LINK_GOOD BIT(10)
+#define DP83867_MDI_CROSSOVER 5
+#define DP83867_MDI_CROSSOVER_AUTO 0b10
+#define DP83867_PHYCTRL_SGMIIEN 0x0800
+#define DP83867_PHYCTRL_RXFIFO_SHIFT 12
+#define DP83867_PHYCTRL_TXFIFO_SHIFT 14
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_MAX 0xf
@@ -144,6 +150,14 @@
#define DP83867_CFG3_INT_OE BIT(7)
#define DP83867_CFG3_ROBUST_AUTO_MDIX BIT(9)
+/* CFG2 bits */
+#define MII_DP83867_CFG2_SPEEDOPT_10EN 0x0040
+#define MII_DP83867_CFG2_SGMII_AUTONEGEN 0x0080
+#define MII_DP83867_CFG2_SPEEDOPT_ENH 0x0100
+#define MII_DP83867_CFG2_SPEEDOPT_CNT 0x0800
+#define MII_DP83867_CFG2_SPEEDOPT_INTLOW 0x2000
+#define MII_DP83867_CFG2_MASK 0x003F
+
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -610,7 +624,7 @@ static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
int ret, val, bs;
- u16 delay;
+ u16 delay, cfg2;
/* Force speed optimization for the PHY even if it strapped */
ret = phy_modify(phydev, DP83867_CFG2, DP83867_DOWNSHIFT_EN,
@@ -759,6 +773,35 @@ static int dp83867_config_init(struct phy_device *phydev)
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ phy_write(phydev, MII_BMCR,
+ (BMCR_ANENABLE | BMCR_FULLDPLX | BMCR_SPEED1000));
+
+ cfg2 = phy_read(phydev, DP83867_CFG2);
+ cfg2 &= MII_DP83867_CFG2_MASK;
+ cfg2 |= (MII_DP83867_CFG2_SPEEDOPT_10EN |
+ MII_DP83867_CFG2_SGMII_AUTONEGEN |
+ MII_DP83867_CFG2_SPEEDOPT_ENH |
+ MII_DP83867_CFG2_SPEEDOPT_CNT |
+ MII_DP83867_CFG2_SPEEDOPT_INTLOW);
+ phy_write(phydev, DP83867_CFG2, cfg2);
+
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL, 0x0);
+
+ phy_write(phydev, MII_DP83867_PHYCTRL,
+ DP83867_PHYCTRL_SGMIIEN |
+ (DP83867_MDI_CROSSOVER_AUTO << DP83867_MDI_CROSSOVER) |
+ (dp83867->rx_fifo_depth << DP83867_PHYCTRL_RXFIFO_SHIFT) |
+ (dp83867->tx_fifo_depth << DP83867_PHYCTRL_TXFIFO_SHIFT));
+ phy_write(phydev, MII_DP83867_BISCR, 0x0);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required along
+ * with clearing bit 7 and increasing autoneg timer above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 132f9bf49198..c9988a08483e 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -271,6 +271,7 @@ enum rgmii_clock_delay {
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8531_02 0x00070572
#define PHY_ID_VSC8540 0x00070760
#define PHY_ID_VSC8541 0x00070770
#define PHY_ID_VSC8552 0x000704e0
@@ -360,6 +361,8 @@ struct vsc8531_private {
* package.
*/
unsigned int base_addr;
+ u32 rx_delay;
+ u32 tx_delay;
#if IS_ENABLED(CONFIG_MACSEC)
/* MACsec fields:
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 95bd2d277ba4..d2c83694e9e3 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -535,9 +535,13 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
u16 reg_val = 0;
int rc;
+ struct vsc8531_private *vsc8531 = phydev->priv;
mutex_lock(&phydev->lock);
+ reg_val = (vsc8531->rx_delay << RGMII_RX_CLK_DELAY_POS) |
+ vsc8531->tx_delay;
+
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_rx_delay_pos;
@@ -1511,6 +1515,17 @@ static int vsc85xx_config_init(struct phy_device *phydev)
{
int rc, i, phy_id;
struct vsc8531_private *vsc8531 = phydev->priv;
+ struct device_node *of_node = phydev->mdio.dev.of_node;
+
+ rc = of_property_read_u32(of_node, "vsc8531,rx-delay",
+ &vsc8531->rx_delay);
+ if (rc < 0)
+ vsc8531->rx_delay = VSC8531_RGMII_CLK_DELAY_1_1_NS;
+
+ rc = of_property_read_u32(of_node, "vsc8531,tx-delay",
+ &vsc8531->tx_delay);
+ if (rc < 0)
+ vsc8531->tx_delay = VSC8531_RGMII_CLK_DELAY_0_2_NS;
rc = vsc85xx_default_config(phydev);
if (rc)
@@ -2226,6 +2241,31 @@ static struct phy_driver vsc85xx_driver[] = {
.get_stats = &vsc85xx_get_stats,
},
{
+ .phy_id = PHY_ID_VSC8531_02,
+ .name = "Microsemi VSC8531-02",
+ .phy_id_mask = 0xfffffff0,
+ /* PHY_GBIT_FEATURES */
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc85xx_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8540,
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
@@ -2459,6 +2499,7 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
{ PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
+ { PHY_ID_VSC8531_02, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
{ PHY_ID_VSC8541, 0xfffffff0, },
{ PHY_ID_VSC8552, 0xfffffff0, },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 20ca6418f7bc..63f20dde85d5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -815,9 +815,6 @@ void phy_stop(struct phy_device *phydev)
mutex_unlock(&phydev->lock);
- phy_state_machine(&phydev->state_queue.work);
- phy_stop_machine(phydev);
-
/* Cannot call flush_scheduled_work() here as desired because
* of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
* will not reenable interrupts.
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d9bdc19b01cc..1d56b4c4cd95 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1034,6 +1034,8 @@ void phy_disconnect(struct phy_device *phydev)
if (phy_interrupt_is_valid(phydev))
phy_free_interrupt(phydev);
+ phy_stop_machine(phydev);
+
phydev->adjust_link = NULL;
phy_detach(phydev);
diff --git a/drivers/net/phy/xilinx_phy.c b/drivers/net/phy/xilinx_phy.c
new file mode 100644
index 000000000000..2410fa3a59ad
--- /dev/null
+++ b/drivers/net/phy/xilinx_phy.c
@@ -0,0 +1,160 @@
+/* Xilinx PCS/PMA Core phy driver
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Description:
+ * This driver is developed for PCS/PMA Core.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/xilinx_phy.h>
+
+#define MII_PHY_STATUS_SPD_MASK 0x0C00
+#define MII_PHY_STATUS_FULLDUPLEX 0x1000
+#define MII_PHY_STATUS_1000 0x0800
+#define MII_PHY_STATUS_100 0x0400
+#define XPCSPMA_PHY_CTRL_ISOLATE_DISABLE 0xFBFF
+
+static int xilinxphy_read_status(struct phy_device *phydev)
+{
+ int err;
+ int status = 0;
+
+ /* Update the link, but return if there
+ * was an error
+ */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ status = phy_read(phydev, MII_LPA);
+
+ if (status & MII_PHY_STATUS_FULLDUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (status & MII_PHY_STATUS_SPD_MASK) {
+ case MII_PHY_STATUS_1000:
+ phydev->speed = SPEED_1000;
+ break;
+
+ case MII_PHY_STATUS_100:
+ phydev->speed = SPEED_100;
+ break;
+
+ default:
+ phydev->speed = SPEED_10;
+ break;
+ }
+ } else {
+ int bmcr = phy_read(phydev, MII_BMCR);
+
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (bmcr & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (bmcr & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+ }
+
+ /* For 1000BASE-X Phy Mode the speed/duplex will always be
+ * 1000Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_1000BASE_X) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ }
+
+ /* For 2500BASE-X Phy Mode the speed/duplex will always be
+ * 2500Mbps/fullduplex
+ */
+ if (phydev->dev_flags == XAE_PHY_TYPE_2500) {
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_of_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device_node *of_node = dev->of_node;
+ u32 phytype;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (!of_node)
+ return -ENODEV;
+
+ if (!of_property_read_u32(of_node, "xlnx,phy-type", &phytype)) {
+ if (phytype == XAE_PHY_TYPE_1000BASE_X)
+ phydev->dev_flags |= XAE_PHY_TYPE_1000BASE_X;
+ if (phytype == XAE_PHY_TYPE_2500)
+ phydev->dev_flags |= XAE_PHY_TYPE_2500;
+ }
+
+ return 0;
+}
+
+static int xilinxphy_config_init(struct phy_device *phydev)
+{
+ int temp;
+
+ xilinxphy_of_init(phydev);
+ temp = phy_read(phydev, MII_BMCR);
+ temp &= XPCSPMA_PHY_CTRL_ISOLATE_DISABLE;
+ phy_write(phydev, MII_BMCR, temp);
+
+ return 0;
+}
+
+static struct phy_driver xilinx_drivers[] = {
+ {
+ .phy_id = XILINX_PHY_ID,
+ .phy_id_mask = XILINX_PHY_ID_MASK,
+ .name = "Xilinx PCS/PMA PHY",
+ .features = PHY_GBIT_FEATURES,
+ .config_init = &xilinxphy_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &xilinxphy_read_status,
+ .resume = &genphy_resume,
+ .suspend = &genphy_suspend,
+ },
+};
+
+module_phy_driver(xilinx_drivers);
+
+static struct mdio_device_id __maybe_unused xilinx_tbl[] = {
+ { XILINX_PHY_ID, XILINX_PHY_ID_MASK },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, xilinx_tbl);
+MODULE_DESCRIPTION("Xilinx PCS/PMA PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..7edba185e1a3 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2017 - 2019 Xilinx, Inc.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -10,40 +11,163 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
+#define WORD_INBYTES (4)
+#define SOC_VER_SIZE (0x4)
+#define EFUSE_MEMORY_SIZE (0xF4)
+#define UNUSED_SPACE (0x8)
+#define ZYNQMP_NVMEM_SIZE (SOC_VER_SIZE + UNUSED_SPACE + \
+ EFUSE_MEMORY_SIZE)
+#define SOC_VERSION_OFFSET (0x0)
+#define EFUSE_START_OFFSET (0xC)
+#define EFUSE_END_OFFSET (0xFC)
+#define EFUSE_NOT_ENABLED (29)
+#define EFUSE_READ (0)
+#define EFUSE_WRITE (1)
-struct zynqmp_nvmem_data {
- struct device *dev;
- struct nvmem_device *nvmem;
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct xilinx_efuse - the basic structure
+ * @src: address of the buffer to store the data to be write/read
+ * @size: no of words to be read/write
+ * @offset: offset to be read/write`
+ * @flag: 0 - represents efuse read and 1- represents efuse write
+ *
+ * this structure stores all the required details to
+ * read/write efuse memory.
+ */
+struct xilinx_efuse {
+ u64 src;
+ u32 size;
+ u32 offset;
+ u32 flag;
+ u32 fullmap;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
+static int zynqmp_efuse_access(void *context, unsigned int offset,
+ void *val, size_t bytes, unsigned int flag)
+{
+ size_t words = bytes / WORD_INBYTES;
+ struct device *dev = context;
+ dma_addr_t dma_addr, dma_buf;
+ struct xilinx_efuse *efuse;
+ char *data;
+ int ret;
+
+ if (!eemi_ops->efuse_access)
+ return -ENXIO;
+
+ if (bytes % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: Bytes requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+ if (offset % WORD_INBYTES != 0) {
+ dev_err(dev,
+ "ERROR: offset requested should be word aligned\n\r");
+ return -ENOTSUPP;
+ }
+
+ efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
+ &dma_addr, GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ data = dma_alloc_coherent(dev, sizeof(bytes),
+ &dma_buf, GFP_KERNEL);
+ if (!data) {
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ return -ENOMEM;
+ }
+
+ if (flag == EFUSE_WRITE) {
+ memcpy(data, val, bytes);
+ efuse->flag = EFUSE_WRITE;
+ } else {
+ efuse->flag = EFUSE_READ;
+ }
+
+ efuse->src = dma_buf;
+ efuse->size = words;
+ efuse->offset = offset;
+
+ eemi_ops->efuse_access(dma_addr, &ret);
+ if (ret != 0) {
+ if (ret == EFUSE_NOT_ENABLED) {
+ dev_err(dev, "ERROR: efuse access is not enabled\n\r");
+ ret = -ENOTSUPP;
+ goto END;
+ }
+ dev_err(dev, "ERROR: in efuse read %x\n\r", ret);
+ ret = -EPERM;
+ goto END;
+ }
+
+ if (flag == EFUSE_READ)
+ memcpy(val, data, bytes);
+END:
+
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+ dma_free_coherent(dev, sizeof(bytes),
+ data, dma_buf);
+
+ return ret;
+}
static int zynqmp_nvmem_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+ void *val, size_t bytes)
{
int ret;
int idcode, version;
- struct zynqmp_nvmem_data *priv = context;
if (!eemi_ops->get_chipid)
return -ENXIO;
- ret = eemi_ops->get_chipid(&idcode, &version);
- if (ret < 0)
- return ret;
+ switch (offset) {
+ /* Soc version offset is zero */
+ case SOC_VERSION_OFFSET:
+ if (bytes != SOC_VER_SIZE)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ pr_debug("Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+ break;
+ /* Efuse offset starts from 0xc */
+ case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
+ ret = zynqmp_efuse_access(context, offset, val,
+ bytes, EFUSE_READ);
+ break;
+ default:
+ *(u32 *)val = 0xDEADBEEF;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
- dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
- *(int *)val = version & SILICON_REVISION_MASK;
+static int zynqmp_nvmem_write(void *context,
+ unsigned int offset, void *val, size_t bytes)
+{
+ /* Efuse offset starts from 0xc */
+ if (offset < EFUSE_START_OFFSET)
+ return -ENOTSUPP;
- return 0;
+ return(zynqmp_efuse_access(context, offset,
+ val, bytes, EFUSE_WRITE));
}
static struct nvmem_config econfig = {
.name = "zynqmp-nvmem",
.owner = THIS_MODULE,
.word_size = 1,
- .size = 1,
- .read_only = true,
+ .size = ZYNQMP_NVMEM_SIZE,
};
static const struct of_device_id zynqmp_nvmem_match[] = {
@@ -54,25 +178,24 @@ MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct zynqmp_nvmem_data *priv;
-
- priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct nvmem_device *nvmem;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
return PTR_ERR(eemi_ops);
- priv->dev = dev;
- econfig.dev = dev;
+ econfig.dev = &pdev->dev;
+ econfig.priv = &pdev->dev;
econfig.reg_read = zynqmp_nvmem_read;
- econfig.priv = priv;
+ econfig.reg_write = zynqmp_nvmem_write;
- priv->nvmem = devm_nvmem_register(dev, &econfig);
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- return PTR_ERR_OR_ZERO(priv->nvmem);
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
}
static struct platform_driver zynqmp_nvmem_driver = {
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d91618641be6..54e4d5a7cf7d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -107,4 +107,15 @@ config OF_DMA_DEFAULT_COHERENT
# arches should select this if DMA is coherent by default for OF devices
bool
+config OF_CONFIGFS
+ bool "Device Tree Overlay ConfigFS interface"
+ select CONFIGFS_FS
+ depends on OF_OVERLAY
+ help
+ Select this option to enable simple user-space driven DT overlay
+ interface to support device tree manipulated at runtime.
+ Say Y here to include this support.
+
+ If unsure, say N.
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af0cccd..b00a95adf519 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-y = base.o device.o platform.o property.o
obj-$(CONFIG_OF_KOBJ) += kobj.o
+obj-$(CONFIG_OF_CONFIGFS) += configfs.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/configfs.c b/drivers/of/configfs.c
new file mode 100644
index 000000000000..6d8f58e5cb5b
--- /dev/null
+++ b/drivers/of/configfs.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Configfs entries for device-tree
+ *
+ * Copyright (C) 2013 - Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/spinlock.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/limits.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+
+#include "of_private.h"
+
+struct cfs_overlay_item {
+ struct config_item item;
+
+ char path[PATH_MAX];
+
+ const struct firmware *fw;
+ struct device_node *overlay;
+ int ov_id;
+
+ void *dtbo;
+ int dtbo_size;
+
+ void *mem;
+};
+
+static DEFINE_MUTEX(overlay_lock);
+
+static int create_overlay(struct cfs_overlay_item *overlay, void *blob)
+{
+ int err;
+
+ /* FIXME */
+ err = of_overlay_fdt_apply(blob, overlay->dtbo_size, &overlay->ov_id);
+ if (err < 0) {
+ pr_err("%s: Failed to create overlay (err=%d)\n",
+ __func__, err);
+ return err;
+ }
+
+ return err;
+}
+
+static inline struct cfs_overlay_item
+ *to_cfs_overlay_item(struct config_item *item)
+{
+ return item ? container_of(item, struct cfs_overlay_item, item) : NULL;
+}
+
+static ssize_t cfs_overlay_item_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->path);
+}
+
+static ssize_t cfs_overlay_item_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ const char *p = page;
+ char *s;
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy to path buffer (and make sure it's always zero terminated */
+ count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p);
+ overlay->path[sizeof(overlay->path) - 1] = '\0';
+
+ /* strip trailing newlines */
+ s = overlay->path + strlen(overlay->path);
+ while (s > overlay->path && *--s == '\n')
+ *s = '\0';
+
+ pr_debug("%s: path is '%s'\n", __func__, overlay->path);
+
+ err = request_firmware(&overlay->fw, overlay->path, NULL);
+ if (err != 0)
+ goto out_err;
+
+ overlay->dtbo_size = overlay->fw->size;
+ err = create_overlay(overlay, (void *)overlay->fw->data);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+
+ release_firmware(overlay->fw);
+ overlay->fw = NULL;
+
+ overlay->path[0] = '\0';
+
+ return count;
+}
+
+static ssize_t cfs_overlay_item_status_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", to_cfs_overlay_item(item)->ov_id >= 0 ?
+ "applied" : "unapplied");
+}
+
+CONFIGFS_ATTR(cfs_overlay_item_, path);
+CONFIGFS_ATTR_RO(cfs_overlay_item_, status);
+
+static struct configfs_attribute *cfs_overlay_attrs[] = {
+ &cfs_overlay_item_attr_path,
+ &cfs_overlay_item_attr_status,
+ NULL,
+};
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item,
+ void *buf, size_t max_count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ pr_debug("%s: buf=%p max_count=%zu\n", __func__, buf, max_count);
+
+ if (!overlay->dtbo)
+ return 0;
+
+ /* copy if buffer provided */
+ if (buf) {
+ /* the buffer must be large enough */
+ if (overlay->dtbo_size > max_count)
+ return -ENOSPC;
+
+ memcpy(buf, overlay->dtbo, overlay->dtbo_size);
+ }
+
+ return overlay->dtbo_size;
+}
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item,
+ const void *buf, size_t count)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+ int err;
+
+ /* if it's set do not allow changes */
+ if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+ return -EPERM;
+
+ /* copy the contents */
+ overlay->dtbo = kmemdup(buf, count, GFP_KERNEL);
+ if (!overlay->dtbo)
+ return -ENOMEM;
+
+ overlay->dtbo_size = count;
+
+ err = create_overlay(overlay, overlay->dtbo);
+ if (err < 0)
+ goto out_err;
+
+ return count;
+
+out_err:
+ kfree(overlay->dtbo);
+ overlay->dtbo = NULL;
+ overlay->dtbo_size = 0;
+
+ return err;
+}
+
+CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M);
+
+static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = {
+ &cfs_overlay_item_attr_dtbo,
+ NULL,
+};
+
+static void cfs_overlay_release(struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ if (overlay->ov_id >= 0)
+ of_overlay_remove(&overlay->ov_id);
+ if (overlay->fw)
+ release_firmware(overlay->fw);
+ /* kfree with NULL is safe */
+ kfree(overlay->dtbo);
+ kfree(overlay->mem);
+ kfree(overlay);
+}
+
+static struct configfs_item_operations cfs_overlay_item_ops = {
+ .release = cfs_overlay_release,
+};
+
+static struct config_item_type cfs_overlay_type = {
+ .ct_item_ops = &cfs_overlay_item_ops,
+ .ct_attrs = cfs_overlay_attrs,
+ .ct_bin_attrs = cfs_overlay_bin_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item
+ *cfs_overlay_group_make_item(struct config_group *group,
+ const char *name)
+{
+ struct cfs_overlay_item *overlay;
+
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+ overlay->ov_id = -1;
+ config_item_init_type_name(&overlay->item, name, &cfs_overlay_type);
+
+ return &overlay->item;
+}
+
+static void cfs_overlay_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+ config_item_put(&overlay->item);
+}
+
+static struct configfs_group_operations overlays_ops = {
+ .make_item = cfs_overlay_group_make_item,
+ .drop_item = cfs_overlay_group_drop_item,
+};
+
+static struct config_item_type overlays_type = {
+ .ct_group_ops = &overlays_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_group_operations of_cfs_ops = {
+ /* empty - we don't allow anything to be created */
+};
+
+static struct config_item_type of_cfs_type = {
+ .ct_group_ops = &of_cfs_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group of_cfs_overlay_group;
+
+static struct configfs_subsystem of_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "device-tree",
+ .ci_type = &of_cfs_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex),
+};
+
+static int __init of_cfs_init(void)
+{
+ int ret;
+
+ pr_info("%s\n", __func__);
+
+ config_group_init(&of_cfs_subsys.su_group);
+ config_group_init_type_name(&of_cfs_overlay_group, "overlays",
+ &overlays_type);
+ configfs_add_default_group(&of_cfs_overlay_group,
+ &of_cfs_subsys.su_group);
+
+ ret = configfs_register_subsystem(&of_cfs_subsys);
+ if (ret != 0) {
+ pr_err("%s: failed to register subsys\n", __func__);
+ goto out;
+ }
+ pr_info("%s: OK\n", __func__);
+out:
+ return ret;
+}
+late_initcall(of_cfs_init);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 91bfdb784829..09994e847552 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -81,6 +81,14 @@ config PCIE_XILINX
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XDMA_PL
+ bool "Xilinx XDMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || MICROBLAZE
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say 'Y' here if you want kernel to enable support the
+ Xilinx XDMA PL PCIe Host Bridge driver.
+
config PCI_XGENE
bool "X-Gene PCIe controller"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 158c59771824..7dcb733dfb9e 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_XDMA_PL) += pcie-xdma-pl.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
diff --git a/drivers/pci/controller/pcie-xdma-pl.c b/drivers/pci/controller/pcie-xdma-pl.c
new file mode 100644
index 000000000000..55876c04b871
--- /dev/null
+++ b/drivers/pci/controller/pcie-xdma-pl.c
@@ -0,0 +1,882 @@
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/irqchip/chained_irq.h>
+
+#include "../pci.h"
+
+/* Register definitions */
+#define XILINX_PCIE_REG_VSEC 0x0000012c
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+#define XILINX_PCIE_REG_IDRN 0x00000160
+#define XILINX_PCIE_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_REG_MSI_HI_MASK 0x0000017c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_IMR_ALL_MASK 0x0FF30FE9
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+#define XILINX_PCIE_IDRN_SHIFT 16
+#define XILINX_PCIE_VSEC_REV_MASK GENMASK(19, 16)
+#define XILINX_PCIE_VSEC_REV_SHIFT 16
+#define XILINX_PCIE_FIFO_SHIFT 5
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+#define INTX_NUM 4
+
+/* For CPM Versal */
+#define CPM_BRIDGE_BASE_OFF 0xCD8
+#define XILINX_PCIE_MISC_IR_STATUS 0x00000340
+#define XILINX_PCIE_MISC_IR_ENABLE 0x00000348
+#define XILINX_PCIE_MISC_IR_LOCAL BIT(1)
+
+/* CPM versal Interrupt registers */
+#define XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT BIT(4)
+#define XILINX_PCIE_INTR_CFG_ERR_POISON BIT(12)
+#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD BIT(15)
+#define XILINX_PCIE_INTR_PM_PME_RCVD BIT(17)
+#define XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT BIT(28)
+
+#define XILINX_PCIE_IMR_ALL_MASK_CPM 0x1FF39FF9
+
+enum msi_mode {
+ MSI_DECD_MODE = 1,
+ MSI_FIFO_MODE,
+};
+
+struct xilinx_msi {
+ struct irq_domain *msi_domain;
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ unsigned long msi_pages;
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @cpm_base: CPM SLCR Register Base
+ * @irq: Interrupt number
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @leg_domain: Legacy IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @irq_misc: Legacy and error interrupt number
+ * @msi_mode: MSI mode
+ */
+struct xilinx_pcie_port {
+ void __iomem *reg_base;
+ void __iomem *cpm_base;
+ u32 irq;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *leg_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int irq_misc;
+ u8 msi_mode;
+};
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+ if (!port->cpm_base)
+ return readl(port->reg_base + reg);
+ else
+ return readl(port->reg_base + reg + CPM_BRIDGE_BASE_OFF);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+ if (!port->cpm_base)
+ writel(val, port->reg_base + reg);
+ else
+ writel(val, port->reg_base + reg + CPM_BRIDGE_BASE_OFF);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+
+ /* Check if link is up when trying to access downstream ports */
+ if (bus->number != port->root_busno)
+ if (!xilinx_pcie_link_is_up(port))
+ return false;
+
+ /* Only one device down on each root port */
+ if (bus->number == port->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie_port *port = bus->sysdata;
+ int relbus;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+ phys_addr_t msg_addr;
+
+ msi->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ msg_addr = virt_to_phys((void *)msi->msi_pages);
+ pcie_write(port, upper_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msg_addr), XILINX_PCIE_REG_MSIBASE2);
+}
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+ .xlate = pci_irqd_intx_xlate,
+};
+
+static void xilinx_pcie_handle_msi_irq(struct xilinx_pcie_port *port,
+ u32 status_reg)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, status_reg)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, status_reg);
+ if (status_reg == XILINX_PCIE_REG_MSI_HI)
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void xilinx_pcie_msi_handler_high(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_HI);
+ chained_irq_exit(chip, desc);
+}
+
+static void xilinx_pcie_msi_handler_low(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct xilinx_pcie_port *port = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ xilinx_pcie_handle_msi_irq(port, XILINX_PCIE_REG_MSI_LOW);
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ u32 val, mask, status, msi_data, bit;
+ unsigned long intr_val;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(port->dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(port->dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(port->dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(port->dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(port->dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(port->dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_INTX) {
+ /* Handle INTx Interrupt */
+ intr_val = pcie_read(port, XILINX_PCIE_REG_IDRN);
+ intr_val = intr_val >> XILINX_PCIE_IDRN_SHIFT;
+
+ for_each_set_bit(bit, &intr_val, INTX_NUM)
+ generic_handle_irq(irq_find_mapping(port->leg_domain,
+ bit));
+ }
+
+ if (port->msi_mode == MSI_FIFO_MODE &&
+ (status & XILINX_PCIE_INTR_MSI) && (!port->cpm_base)) {
+ /* MSI Interrupt */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ goto error;
+ }
+
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ /* Handle MSI Interrupt */
+ val = irq_find_mapping(port->msi.dev_domain,
+ msi_data);
+ if (val)
+ generic_handle_irq(val);
+ }
+ }
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(port->dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(port->dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(port->dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(port->dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(port->dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(port->dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(port->dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(port->dev, "Master slave error\n");
+
+ if (port->cpm_base) {
+ if (status & XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT)
+ dev_warn(port->dev, "PCIe ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_ERR_POISON)
+ dev_warn(port->dev, "ECAM poisoned completion received\n");
+
+ if (status & XILINX_PCIE_INTR_PME_TO_ACK_RCVD)
+ dev_warn(port->dev, "PME_TO_ACK message received\n");
+
+ if (status & XILINX_PCIE_INTR_PM_PME_RCVD)
+ dev_warn(port->dev, "PM_PME message received\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT)
+ dev_warn(port->dev, "PCIe completion timeout received\n");
+ }
+
+error:
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_PCIE_REG_IDR);
+ if (port->cpm_base) {
+ val = readl(port->cpm_base + XILINX_PCIE_MISC_IR_STATUS);
+ if (val)
+ writel(val,
+ port->cpm_base + XILINX_PCIE_MISC_IR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "xilinx_pcie:msi",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info xilinx_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &xilinx_msi_irq_chip,
+};
+
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+ phys_addr_t msi_addr;
+
+ msi_addr = virt_to_phys((void *)msi->msi_pages);
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int xilinx_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "Xilinx MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+ .irq_set_affinity = xilinx_msi_set_affinity,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct xilinx_pcie_port *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit;
+ int i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static int xilinx_pcie_init_msi_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+
+ msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(port->dev, "failed to create dev IRQ domain\n");
+ return -ENOMEM;
+ }
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &xilinx_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(port->dev, "failed to create msi IRQ domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ xilinx_pcie_enable_msi(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, INTX_NUM,
+ &intx_domain_ops,
+ port);
+ if (!port->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->leg_domain);
+ }
+
+ xilinx_pcie_init_msi_irq_domain(port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+ if (xilinx_pcie_link_is_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ if (!port->cpm_base)
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+ pcie_write(port, XILINX_PCIE_IDRN_MASK, XILINX_PCIE_REG_IDRN_MASK);
+ if (port->msi_mode == MSI_DECD_MODE) {
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_MSI_HI_MASK);
+ }
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+
+ if (port->cpm_base) {
+ writel(XILINX_PCIE_MISC_IR_LOCAL,
+ port->cpm_base + XILINX_PCIE_MISC_IR_ENABLE);
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK_CPM,
+ XILINX_PCIE_REG_IMR);
+ }
+}
+
+static int xilinx_request_misc_irq(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int err;
+
+ port->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (port->irq_misc <= 0) {
+ dev_err(dev, "Unable to find misc IRQ line\n");
+ return port->irq_misc;
+ }
+ err = devm_request_irq(dev, port->irq_misc,
+ xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request misc IRQ line %d\n",
+ port->irq_misc);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xilinx_request_msi_irq(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0) {
+ dev_err(dev, "Unable to find msi0 IRQ line\n");
+ return port->msi.irq_msi0;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi0,
+ xilinx_pcie_msi_handler_low,
+ port);
+
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0) {
+ dev_err(dev, "Unable to find msi1 IRQ line\n");
+ return port->msi.irq_msi1;
+ }
+
+ irq_set_chained_handler_and_data(port->msi.irq_msi1,
+ xilinx_pcie_msi_handler_high,
+ port);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ const char *type;
+ int err, mode_val, val;
+
+ if (of_device_is_compatible(node, "xlnx,xdma-host-3.00")) {
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ port->reg_base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ val = pcie_read(port, XILINX_PCIE_REG_BIR);
+ val = (val >> XILINX_PCIE_FIFO_SHIFT) & MSI_DECD_MODE;
+ mode_val = pcie_read(port, XILINX_PCIE_REG_VSEC) &
+ XILINX_PCIE_VSEC_REV_MASK;
+ mode_val = mode_val >> XILINX_PCIE_VSEC_REV_SHIFT;
+ if (mode_val && !val) {
+ port->msi_mode = MSI_DECD_MODE;
+ dev_info(dev, "Using MSI Decode mode\n");
+ } else {
+ port->msi_mode = MSI_FIFO_MODE;
+ dev_info(dev, "Using MSI FIFO mode\n");
+ }
+
+ if (port->msi_mode == MSI_DECD_MODE) {
+ err = xilinx_request_misc_irq(port);
+ if (err)
+ return err;
+
+ err = xilinx_request_msi_irq(port);
+ if (err)
+ return err;
+
+ } else if (port->msi_mode == MSI_FIFO_MODE) {
+ port->irq = irq_of_parse_and_map(node, 0);
+ if (!port->irq) {
+ dev_err(dev, "Unable to find IRQ line\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->irq,
+ xilinx_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n",
+ port->irq);
+ return err;
+ }
+ }
+ } else if (of_device_is_compatible(node, "xlnx,versal-cpm-host-1.00")) {
+ struct resource *res;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cpm_slcr");
+ port->cpm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->cpm_base))
+ return PTR_ERR(port->cpm_base);
+
+ err = xilinx_request_misc_irq(port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port;
+ struct device *dev = &pdev->dev;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ struct pci_host_bridge *bridge;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ err = xilinx_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(port);
+
+ err = xilinx_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
+ if (err) {
+ dev_err(dev, "Getting bridge resources failed\n");
+ return err;
+ }
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = port->root_busno;
+ bridge->ops = &xilinx_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err)
+ return err;
+
+ bus = bridge->bus;
+
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+static const struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,xdma-host-3.00", },
+ { .compatible = "xlnx,versal-cpm-host-1.00", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 9bd1427f2fd6..718ee1ffbc40 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -6,6 +6,7 @@
* (C) Copyright 2014 - 2015, Xilinx, Inc.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -169,6 +170,7 @@ struct nwl_pcie {
u8 root_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -491,7 +493,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
- domain->host_data, handle_simple_irq,
+ domain->host_data, handle_simple_irq,
NULL, NULL);
}
mutex_unlock(&msi->lock);
@@ -499,7 +501,7 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+ unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -753,7 +755,6 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
-
/* Disable all legacy interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
@@ -837,6 +838,11 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+ clk_prepare_enable(pcie->clk);
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b3ed94b98d9b..05890bdc1cf3 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -67,6 +67,15 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+
+config PHY_XILINX_ZYNQMP
+ tristate "Xilinx ZynqMP PHY driver"
+ depends on ARCH_ZYNQMP
+ select GENERIC_PHY
+ help
+ Enable this to support ZynqMP High Speed Gigabit Transceiver
+ that is part of ZynqMP SoC.
+
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
source "drivers/phy/intel/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 310c149a9df5..560792b7d9d4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -29,3 +29,4 @@ obj-y += broadcom/ \
socionext/ \
st/ \
ti/
+obj-$(CONFIG_PHY_XILINX_ZYNQMP) += phy-zynqmp.o
diff --git a/drivers/phy/phy-zynqmp.c b/drivers/phy/phy-zynqmp.c
new file mode 100644
index 000000000000..9c1257962546
--- /dev/null
+++ b/drivers/phy/phy-zynqmp.c
@@ -0,0 +1,1583 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
+ *
+ * Copyright (C) 2015 - 2016 Xilinx Inc.
+ *
+ * Author: Subbaraya Sundeep <sbhatta@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This driver is tested for USB and SATA currently.
+ * Other controllers PCIe, Display Port and SGMII should also
+ * work but that is experimental as of now.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <dt-bindings/phy/phy.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/reset.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#define MAX_LANES 4
+
+#define RST_TIMEOUT 1000
+
+#define ICM_CFG0 0x10010
+#define ICM_CFG1 0x10014
+#define ICM_CFG0_L0_MASK 0x07
+#define ICM_CFG0_L1_MASK 0x70
+#define ICM_CFG1_L2_MASK 0x07
+#define ICM_CFG2_L3_MASK 0x70
+
+#define TM_CMN_RST 0x10018
+#define TM_CMN_RST_MASK 0x3
+#define TM_CMN_RST_EN 0x1
+#define TM_CMN_RST_SET 0x2
+
+#define ICM_PROTOCOL_PD 0x0
+#define ICM_PROTOCOL_PCIE 0x1
+#define ICM_PROTOCOL_SATA 0x2
+#define ICM_PROTOCOL_USB 0x3
+#define ICM_PROTOCOL_DP 0x4
+#define ICM_PROTOCOL_SGMII 0x5
+
+#define PLL_REF_SEL0 0x10000
+#define PLL_REF_OFFSET 0x4
+#define PLL_FREQ_MASK 0x1F
+
+#define L0_L0_REF_CLK_SEL 0x2860
+
+#define L0_PLL_STATUS_READ_1 0x23E4
+#define PLL_STATUS_READ_OFFSET 0x4000
+#define PLL_STATUS_LOCKED 0x10
+
+#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
+#define L0_PLL_SS_STEP_SIZE_1 0x2374
+#define L0_PLL_SS_STEP_SIZE_2 0x2378
+#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237C
+#define STEP_SIZE_OFFSET 0x4000
+#define STEP_SIZE_0_MASK 0xFF
+#define STEP_SIZE_1_MASK 0xFF
+#define STEP_SIZE_2_MASK 0xFF
+#define STEP_SIZE_3_MASK 0x3
+#define FORCE_STEP_SIZE 0x10
+#define FORCE_STEPS 0x20
+
+#define L0_PLL_SS_STEPS_0_LSB 0x2368
+#define L0_PLL_SS_STEPS_1_MSB 0x236C
+#define STEPS_OFFSET 0x4000
+#define STEPS_0_MASK 0xFF
+#define STEPS_1_MASK 0x07
+
+#define BGCAL_REF_SEL 0x10028
+#define BGCAL_REF_VALUE 0x0C
+
+#define L3_TM_CALIB_DIG19 0xEC4C
+#define L3_TM_CALIB_DIG19_NSW 0x07
+
+#define TM_OVERRIDE_NSW_CODE 0x20
+
+#define L3_CALIB_DONE_STATUS 0xEF14
+#define CALIB_DONE 0x02
+
+#define L0_TXPMA_ST_3 0x0B0C
+#define DN_CALIB_CODE 0x3F
+#define DN_CALIB_SHIFT 3
+
+#define L3_TM_CALIB_DIG18 0xEC48
+#define L3_TM_CALIB_DIG18_NSW 0xE0
+#define NSW_SHIFT 5
+#define NSW_PIPE_SHIFT 4
+
+#define L0_TM_PLL_DIG_37 0x2094
+#define TM_PLL_DIG_37_OFFSET 0x4000
+#define TM_COARSE_CODE_LIMIT 0x10
+
+#define L0_TM_DIG_6 0x106C
+#define TM_DIG_6_OFFSET 0x4000
+#define TM_DISABLE_DESCRAMBLE_DECODER 0x0F
+
+#define L0_TX_DIG_61 0x00F4
+#define TX_DIG_61_OFFSET 0x4000
+#define TM_DISABLE_SCRAMBLE_ENCODER 0x0F
+
+#define L0_TX_ANA_TM_18 0x0048
+#define TX_ANA_TM_18_OFFSET 0x4000
+
+#define L0_TX_ANA_TM_118 0x01D8
+#define TX_ANA_TM_118_OFFSET 0x4000
+#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
+
+#define L0_TXPMD_TM_45 0x0CB4
+#define TXPMD_TM_45_OFFSET 0x4000
+#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
+#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
+#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
+#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
+#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
+
+#define L0_TXPMD_TM_48 0x0CC0
+#define TXPMD_TM_48_OFFSET 0x4000
+
+#define TX_PROT_BUS_WIDTH 0x10040
+#define RX_PROT_BUS_WIDTH 0x10044
+
+#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_10 0x0
+#define PROT_BUS_WIDTH_20 0x1
+#define PROT_BUS_WIDTH_40 0x2
+
+#define LANE_CLK_SHARE_MASK 0x8F
+
+#define SATA_CONTROL_OFFSET 0x0100
+
+#define CONTROLLERS_PER_LANE 5
+
+#define PIPE_CLK_OFFSET 0x7c
+#define PIPE_CLK_ON 1
+#define PIPE_CLK_OFF 0
+#define PIPE_POWER_OFFSET 0x80
+#define PIPE_POWER_ON 1
+#define PIPE_POWER_OFF 0
+
+#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */
+#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */
+#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */
+#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */
+#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */
+#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */
+#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */
+#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */
+#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */
+#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */
+#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */
+#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */
+#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */
+#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */
+
+/*
+ * This table holds the valid combinations of controllers and
+ * lanes(Interconnect Matrix).
+ */
+static unsigned int icm_matrix[][CONTROLLERS_PER_LANE] = {
+ { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
+ { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
+ { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
+ XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
+ { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
+ XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
+};
+
+/* Allowed PLL reference clock frequencies */
+enum pll_frequencies {
+ REF_19_2M = 0,
+ REF_20M,
+ REF_24M,
+ REF_26M,
+ REF_27M,
+ REF_38_4M,
+ REF_40M,
+ REF_52M,
+ REF_100M,
+ REF_108M,
+ REF_125M,
+ REF_135M,
+ REF_150M,
+};
+
+/**
+ * struct xpsgtr_phy - representation of a lane
+ * @phy: pointer to the kernel PHY device
+ * @type: controller which uses this lane
+ * @lane: lane number
+ * @protocol: protocol in which the lane operates
+ * @ref_clk: enum of allowed ref clock rates for this lane PLL
+ * @pll_lock: PLL status
+ * @skip_phy_init: skip phy_init() if true
+ * @data: pointer to hold private data
+ * @refclk_rate: PLL reference clock frequency
+ * @share_laneclk: lane number of the clock to be shared
+ */
+struct xpsgtr_phy {
+ struct phy *phy;
+ u8 type;
+ u8 lane;
+ u8 protocol;
+ enum pll_frequencies ref_clk;
+ bool pll_lock;
+ bool skip_phy_init;
+ void *data;
+ u32 refclk_rate;
+ u32 share_laneclk;
+};
+
+/**
+ * struct xpsgtr_ssc - structure to hold SSC settings for a lane
+ * @refclk_rate: PLL reference clock frequency
+ * @pll_ref_clk: value to be written to register for corresponding ref clk rate
+ * @steps: number of steps of SSC (Spread Spectrum Clock)
+ * @step_size: step size of each step
+ */
+struct xpsgtr_ssc {
+ u32 refclk_rate;
+ u8 pll_ref_clk;
+ u32 steps;
+ u32 step_size;
+};
+
+/* lookup table to hold all settings needed for a ref clock frequency */
+static struct xpsgtr_ssc ssc_lookup[] = {
+ {19200000, 0x05, 608, 264020},
+ {20000000, 0x06, 634, 243454},
+ {24000000, 0x07, 760, 168973},
+ {26000000, 0x08, 824, 143860},
+ {27000000, 0x09, 856, 86551},
+ {38400000, 0x0A, 1218, 65896},
+ {40000000, 0x0B, 634, 243454},
+ {52000000, 0x0C, 824, 143860},
+ {100000000, 0x0D, 1058, 87533},
+ {108000000, 0x0E, 856, 86551},
+ {125000000, 0x0F, 992, 119497},
+ {135000000, 0x10, 1070, 55393},
+ {150000000, 0x11, 792, 187091}
+};
+
+/**
+ * struct xpsgtr_dev - representation of a ZynMP GT device
+ * @dev: pointer to device
+ * @serdes: serdes base address
+ * @siou: siou base address
+ * @gtr_mutex: mutex for locking
+ * @phys: pointer to all the lanes
+ * @tx_term_fix: fix for GT issue
+ * @saved_icm_cfg0: stored value of ICM CFG0 register
+ * @saved_icm_cfg1: stored value of ICM CFG1 register
+ * @sata_rst: a reset control for SATA
+ * @dp_rst: a reset control for DP
+ * @usb0_crst: a reset control for usb0 core
+ * @usb1_crst: a reset control for usb1 core
+ * @usb0_hibrst: a reset control for usb0 hibernation module
+ * @usb1_hibrst: a reset control for usb1 hibernation module
+ * @usb0_apbrst: a reset control for usb0 apb bus
+ * @usb1_apbrst: a reset control for usb1 apb bus
+ * @gem0_rst: a reset control for gem0
+ * @gem1_rst: a reset control for gem1
+ * @gem2_rst: a reset control for gem2
+ * @gem3_rst: a reset control for gem3
+ */
+struct xpsgtr_dev {
+ struct device *dev;
+ void __iomem *serdes;
+ void __iomem *siou;
+ struct mutex gtr_mutex; /* mutex for locking */
+ struct xpsgtr_phy **phys;
+ bool tx_term_fix;
+ unsigned int saved_icm_cfg0;
+ unsigned int saved_icm_cfg1;
+ struct reset_control *sata_rst;
+ struct reset_control *dp_rst;
+ struct reset_control *usb0_crst;
+ struct reset_control *usb1_crst;
+ struct reset_control *usb0_hibrst;
+ struct reset_control *usb1_hibrst;
+ struct reset_control *usb0_apbrst;
+ struct reset_control *usb1_apbrst;
+ struct reset_control *gem0_rst;
+ struct reset_control *gem1_rst;
+ struct reset_control *gem2_rst;
+ struct reset_control *gem3_rst;
+};
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 pe[4][4] = { { 0x2, 0x2, 0x2, 0x2 },
+ { 0x1, 0x1, 0x1, 0xff },
+ { 0x0, 0x0, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(pe[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TX_ANA_TM_18_OFFSET +
+ L0_TX_ANA_TM_18);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_override_deemph);
+
+int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ static u8 vs[4][4] = { { 0x2a, 0x27, 0x24, 0x20 },
+ { 0x27, 0x23, 0x20, 0xff },
+ { 0x24, 0x20, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0xff } };
+
+ writel(vs[plvl][vlvl],
+ gtr_dev->serdes + gtr_phy->lane * TXPMD_TM_48_OFFSET +
+ L0_TXPMD_TM_48);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_margining_factor);
+
+/**
+ * xpsgtr_configure_pll - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u32 offset;
+ u32 steps;
+ u32 size;
+ u8 pll_ref_clk;
+
+ steps = ssc_lookup[gtr_phy->ref_clk].steps;
+ size = ssc_lookup[gtr_phy->ref_clk].step_size;
+ pll_ref_clk = ssc_lookup[gtr_phy->ref_clk].pll_ref_clk;
+
+ offset = gtr_phy->lane * PLL_REF_OFFSET + PLL_REF_SEL0;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~PLL_FREQ_MASK) | pll_ref_clk;
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* Enable lane clock sharing, if required */
+ if (gtr_phy->share_laneclk != gtr_phy->lane) {
+ /* Lane3 Ref Clock Selection Register */
+ offset = gtr_phy->lane * PLL_REF_OFFSET + L0_L0_REF_CLK_SEL;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~LANE_CLK_SHARE_MASK) |
+ (1 << gtr_phy->share_laneclk);
+ writel(reg, gtr_dev->serdes + offset);
+ }
+
+ /* SSC step size [7:0] */
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_0_MASK) |
+ (size & STEP_SIZE_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [15:8] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_1;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_1_MASK) |
+ (size & STEP_SIZE_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [23:16] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_2;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_2_MASK) |
+ (size & STEP_SIZE_2_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [7:0] */
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_0_LSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_0_MASK) |
+ (steps & STEPS_0_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC steps [10:8] */
+ steps = steps >> 8;
+ offset = gtr_phy->lane * STEPS_OFFSET + L0_PLL_SS_STEPS_1_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEPS_1_MASK) |
+ (steps & STEPS_1_MASK);
+ writel(reg, gtr_dev->serdes + offset);
+
+ /* SSC step size [24:25] */
+ size = size >> 8;
+ offset = gtr_phy->lane * STEP_SIZE_OFFSET + L0_PLL_SS_STEP_SIZE_3_MSB;
+ reg = readl(gtr_dev->serdes + offset);
+ reg = (reg & ~STEP_SIZE_3_MASK) |
+ (size & STEP_SIZE_3_MASK);
+ reg |= FORCE_STEP_SIZE | FORCE_STEPS;
+ writel(reg, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_lane_setprotocol - sets required protocol in ICM registers
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_lane_setprotocol(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 reg;
+ u8 protocol = gtr_phy->protocol;
+
+ switch (gtr_phy->lane) {
+ case 0:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 1:
+ reg = readl(gtr_dev->serdes + ICM_CFG0);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG0);
+ break;
+ case 2:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L0_MASK) | protocol;
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ case 3:
+ reg = readl(gtr_dev->serdes + ICM_CFG1);
+ reg = (reg & ~ICM_CFG0_L1_MASK) | (protocol << 4);
+ writel(reg, gtr_dev->serdes + ICM_CFG1);
+ break;
+ default:
+ /* We already checked 0 <= lane <= 3 */
+ break;
+ }
+}
+
+/**
+ * xpsgtr_get_ssc - gets the required ssc settings based on clk rate
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_get_ssc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 i;
+
+ /*
+ * Assign the required spread spectrum(SSC) settings
+ * from lane refernce clk rate
+ */
+ for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
+ if (gtr_phy->refclk_rate == ssc_lookup[i].refclk_rate) {
+ gtr_phy->ref_clk = i;
+ return 0;
+ }
+ }
+
+ /* Did not get valid ssc settings*/
+ return -EINVAL;
+}
+
+/**
+ * xpsgtr_configure_lane - configures SSC settings for a lane
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_configure_lane(struct xpsgtr_phy *gtr_phy)
+{
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ case XPSGTR_TYPE_USB1:
+ gtr_phy->protocol = ICM_PROTOCOL_USB;
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ gtr_phy->protocol = ICM_PROTOCOL_SATA;
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ gtr_phy->protocol = ICM_PROTOCOL_DP;
+ break;
+ case XPSGTR_TYPE_PCIE_0:
+ case XPSGTR_TYPE_PCIE_1:
+ case XPSGTR_TYPE_PCIE_2:
+ case XPSGTR_TYPE_PCIE_3:
+ gtr_phy->protocol = ICM_PROTOCOL_PCIE;
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ case XPSGTR_TYPE_SGMII1:
+ case XPSGTR_TYPE_SGMII2:
+ case XPSGTR_TYPE_SGMII3:
+ gtr_phy->protocol = ICM_PROTOCOL_SGMII;
+ break;
+ default:
+ gtr_phy->protocol = ICM_PROTOCOL_PD;
+ break;
+ }
+
+ /* Get SSC settinsg for refernce clk rate */
+ if (xpsgtr_get_ssc(gtr_phy) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * xpsgtr_config_usbpipe - configures the PIPE3 signals for USB
+ * @gtr_phy: pointer to gtr phy device
+ */
+static void xpsgtr_config_usbpipe(struct xpsgtr_phy *gtr_phy)
+{
+ struct phy *phy = gtr_phy->phy;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ void __iomem *regs = dev_get_platdata(&phy->dev);
+
+ if (regs) {
+ /* Set PIPE power present signal */
+ writel(PIPE_POWER_ON, regs + PIPE_POWER_OFFSET);
+ /* Clear PIPE CLK signal */
+ writel(PIPE_CLK_OFF, regs + PIPE_CLK_OFFSET);
+ } else {
+ dev_info(gtr_dev->dev,
+ "%s: No valid Platform_data found\n", __func__);
+ }
+}
+
+/**
+ * xpsgtr_reset_assert - asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_assert(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_assert(rstc);
+
+ /* wait until reset is asserted or timeout */
+ timeout = jiffies + loop_time;
+
+ while (!time_after_eq(jiffies, timeout)) {
+ if (reset_control_status(rstc) > 0)
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_reset_release - de-asserts reset using reset framework
+ * @rstc: pointer to reset_control
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_reset_release(struct reset_control *rstc)
+{
+ unsigned long loop_time = msecs_to_jiffies(RST_TIMEOUT);
+ unsigned long timeout;
+
+ reset_control_deassert(rstc);
+
+ /* wait until reset is de-asserted or timeout */
+ timeout = jiffies + loop_time;
+ while (!time_after_eq(jiffies, timeout)) {
+ if (!reset_control_status(rstc))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * xpsgtr_controller_reset - puts controller in reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_apbrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_hibrst);
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_apbrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_assert(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_assert(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_assert(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_assert(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_assert(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_assert(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_controller_release_reset - releases controller from reset
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_controller_release_reset(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ xpsgtr_reset_release(gtr_dev->usb0_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb0_hibrst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ xpsgtr_reset_release(gtr_dev->usb1_apbrst);
+
+ /* Config PIPE3 signals after releasing APB reset */
+ xpsgtr_config_usbpipe(gtr_phy);
+
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ ret = xpsgtr_reset_release(gtr_dev->usb1_hibrst);
+ break;
+ case XPSGTR_TYPE_SATA_0:
+ case XPSGTR_TYPE_SATA_1:
+ ret = xpsgtr_reset_release(gtr_dev->sata_rst);
+ break;
+ case XPSGTR_TYPE_DP_0:
+ case XPSGTR_TYPE_DP_1:
+ ret = xpsgtr_reset_release(gtr_dev->dp_rst);
+ break;
+ case XPSGTR_TYPE_SGMII0:
+ ret = xpsgtr_reset_release(gtr_dev->gem0_rst);
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ ret = xpsgtr_reset_release(gtr_dev->gem1_rst);
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ ret = xpsgtr_reset_release(gtr_dev->gem2_rst);
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ ret = xpsgtr_reset_release(gtr_dev->gem3_rst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_usb_rst_assert - assert USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_assert(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_assert(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_assert);
+
+/**
+ * xpsgtr_usb_rst_release - release USB core reset
+ * @phy: pointer to phy
+ *
+ * Return: 0 on success or error on failure
+ */
+int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ ret = xpsgtr_reset_release(gtr_dev->usb0_crst);
+ break;
+ case XPSGTR_TYPE_USB1:
+ ret = xpsgtr_reset_release(gtr_dev->usb1_crst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xpsgtr_usb_crst_release);
+
+int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ u32 offset, reg;
+ u32 timeout = 1000;
+ int ret = 0;
+
+ /* Check pll is locked */
+ offset = gtr_phy->lane * PLL_STATUS_READ_OFFSET + L0_PLL_STATUS_READ_1;
+ dev_dbg(gtr_dev->dev, "Waiting for PLL lock...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + offset);
+ if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "PLL lock time out\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ udelay(1);
+ } while (1);
+
+ if (ret == 0)
+ gtr_phy->pll_lock = true;
+
+ dev_info(gtr_dev->dev, "Lane:%d type:%d protocol:%d pll_locked:%s\n",
+ gtr_phy->lane, gtr_phy->type, gtr_phy->protocol,
+ gtr_phy->pll_lock ? "yes" : "no");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xpsgtr_wait_pll_lock);
+
+/**
+ * xpsgtr_set_txwidth - This function sets the tx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: tx bus width size
+ */
+static void xpsgtr_set_txwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + TX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_set_rxwidth - This function sets the rx bus width of the lane
+ * @gtr_phy: pointer to lane
+ * @width: rx bus width size
+ */
+static void xpsgtr_set_rxwidth(struct xpsgtr_phy *gtr_phy, u32 width)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ writel(gtr_phy->lane * PROT_BUS_WIDTH_SHIFT >> width,
+ gtr_dev->serdes + RX_PROT_BUS_WIDTH);
+}
+
+/**
+ * xpsgtr_bypass_scramenc - This bypasses scrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_scramenc(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ offset = gtr_phy->lane * TX_DIG_61_OFFSET + L0_TX_DIG_61;
+ writel(TM_DISABLE_SCRAMBLE_ENCODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_bypass_descramdec - bypasses descrambler and 8b/10b encoder feature
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_bypass_descramdec(struct xpsgtr_phy *gtr_phy)
+{
+ u32 offset;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ offset = gtr_phy->lane * TM_DIG_6_OFFSET + L0_TM_DIG_6;
+ writel(TM_DISABLE_DESCRAMBLE_DECODER, gtr_dev->serdes + offset);
+}
+
+/**
+ * xpsgtr_misc_sgmii - miscellaneous settings for SGMII
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sgmii(struct xpsgtr_phy *gtr_phy)
+{
+ /* Set SGMII protocol tx bus width 10 bits */
+ xpsgtr_set_txwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* Set SGMII protocol rx bus width 10 bits */
+ xpsgtr_set_rxwidth(gtr_phy, PROT_BUS_WIDTH_10);
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+}
+
+/**
+ * xpsgtr_misc_sata - miscellaneous settings for SATA
+ * @gtr_phy: pointer to lane
+ */
+static void xpsgtr_misc_sata(struct xpsgtr_phy *gtr_phy)
+{
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ /* bypass Descrambler and 8b/10b decoder */
+ xpsgtr_bypass_descramdec(gtr_phy);
+
+ /* bypass Scrambler and 8b/10b Encoder */
+ xpsgtr_bypass_scramenc(gtr_phy);
+
+ writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
+}
+
+/**
+ * xpsgtr_ulpi_reset - This function perform's ULPI reset sequence.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing USB type or error from
+ * communication with firmware
+ */
+static int xpsgtr_ulpi_reset(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_USB0:
+ node_id = NODE_USB_0;
+ break;
+ case XPSGTR_TYPE_USB1:
+ node_id = NODE_USB_1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_ULPI_RESET, 0, 0, NULL);
+ if (ret < 0)
+ dev_err(gtr_dev->dev, "failed to perform ULPI reset\n");
+
+ return ret;
+}
+
+/**
+ * xpsgtr_set_sgmii_pcs - This function sets the sgmii mode for GEM.
+ * @gtr_phy: pointer to lane
+ *
+ * Return: 0 on success, -EINVAL on non existing SGMII type or error from
+ * communication with firmware
+ */
+static int xpsgtr_set_sgmii_pcs(struct xpsgtr_phy *gtr_phy)
+{
+ u32 node_id;
+ int ret = 0;
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+
+ if (!eemi_ops->ioctl)
+ return -ENOTSUPP;
+
+ /* Set the PCS signal detect to 1 */
+ switch (gtr_phy->type) {
+ case XPSGTR_TYPE_SGMII0:
+ node_id = NODE_ETH_0;
+ break;
+ case XPSGTR_TYPE_SGMII1:
+ node_id = NODE_ETH_1;
+ break;
+ case XPSGTR_TYPE_SGMII2:
+ node_id = NODE_ETH_2;
+ break;
+ case XPSGTR_TYPE_SGMII3:
+ node_id = NODE_ETH_3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SGMII_MODE,
+ PM_SGMII_ENABLE, 0, NULL);
+ if (ret < 0) {
+ dev_err(gtr_dev->dev, "failed to set GEM to SGMII mode\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * xpsgtr_phyinit_required - check if phy_init for the lane can be skipped
+ * @gtr_phy: pointer to the phy lane
+ *
+ * Return: true if phy_init can be skipped or false
+ */
+static bool xpsgtr_phyinit_required(struct xpsgtr_phy *gtr_phy)
+{
+ /*
+ * As USB may save the snapshot of the states during hibernation, doing
+ * phy_init() will put the USB controller into reset, resulting in the
+ * losing of the saved snapshot. So try to avoid phy_init() for USB
+ * except when gtr_phy->skip_phy_init is false (this happens when FPD is
+ * shutdown during suspend or when gt lane is changed from current one)
+ */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB && gtr_phy->skip_phy_init)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * xpsgtr_phy_init - initializes a lane
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_phy_init(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->data;
+ int ret = 0;
+ u32 offset;
+ u32 reg;
+ u32 nsw;
+ u32 timeout = 500;
+
+ mutex_lock(&gtr_dev->gtr_mutex);
+
+ /* Check if phy_init() is required */
+ if (xpsgtr_phyinit_required(gtr_phy))
+ goto out;
+
+ /* Put controller in reset */
+ ret = xpsgtr_controller_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to assert reset\n");
+ goto out;
+ }
+
+ /*
+ * There is a functional issue in the GT. The TX termination resistance
+ * can be out of spec due to a bug in the calibration logic. Below is
+ * the workaround to fix it. This below is required for XCZU9EG silicon.
+ */
+ if (gtr_dev->tx_term_fix) {
+ /* Enabling Test Mode control for CMN Rest */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ writel(0x00, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+ writel(TM_OVERRIDE_NSW_CODE, gtr_dev->serdes +
+ L3_TM_CALIB_DIG19);
+
+ /* As a part of work around sequence for PMOS calibration fix,
+ * we need to configure any lane ICM_CFG to valid protocol. This
+ * will deassert the CMN_Resetn signal.
+ */
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ dev_dbg(gtr_dev->dev, "calibrating...\n");
+
+ do {
+ reg = readl(gtr_dev->serdes + L3_CALIB_DONE_STATUS);
+ if ((reg & CALIB_DONE) == CALIB_DONE)
+ break;
+
+ if (!--timeout) {
+ dev_err(gtr_dev->dev, "calibration time out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ udelay(1);
+ } while (1);
+
+ dev_dbg(gtr_dev->dev, "calibration done\n");
+
+ /* Reading NMOS Register Code */
+ nsw = readl(gtr_dev->serdes + L0_TXPMA_ST_3);
+
+ /* Set Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_EN;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ nsw = nsw & DN_CALIB_CODE;
+
+ /* Writing NMOS register values back [5:3] */
+ reg = nsw >> DN_CALIB_SHIFT;
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG19);
+
+ /* Writing NMOS register value [2:0] */
+ reg = ((nsw & 0x7) << NSW_SHIFT) | (1 << NSW_PIPE_SHIFT);
+ writel(reg, gtr_dev->serdes + L3_TM_CALIB_DIG18);
+
+ /* Clear Test Mode reset */
+ reg = readl(gtr_dev->serdes + TM_CMN_RST);
+ reg = (reg & ~TM_CMN_RST_MASK) | TM_CMN_RST_SET;
+ writel(reg, gtr_dev->serdes + TM_CMN_RST);
+
+ gtr_dev->tx_term_fix = false;
+ }
+
+ /* Enable coarse code saturation limiting logic */
+ offset = gtr_phy->lane * TM_PLL_DIG_37_OFFSET + L0_TM_PLL_DIG_37;
+ writel(TM_COARSE_CODE_LIMIT, gtr_dev->serdes + offset);
+
+ xpsgtr_configure_pll(gtr_phy);
+ xpsgtr_lane_setprotocol(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SATA)
+ xpsgtr_misc_sata(gtr_phy);
+
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ xpsgtr_misc_sgmii(gtr_phy);
+
+ /* Bring controller out of reset */
+ ret = xpsgtr_controller_release_reset(gtr_phy);
+ if (ret != 0) {
+ dev_err(gtr_dev->dev, "Failed to release reset\n");
+ goto out;
+ }
+
+ /* Wait till pll is locked for all protocols except DP. For DP
+ * pll locking function will be called from driver.
+ */
+ if (gtr_phy->protocol != ICM_PROTOCOL_DP) {
+ ret = xpsgtr_wait_pll_lock(phy);
+ if (ret != 0)
+ goto out;
+ } else {
+ offset = gtr_phy->lane * TXPMD_TM_45_OFFSET + L0_TXPMD_TM_45;
+ reg = L0_TXPMD_TM_45_OVER_DP_MAIN |
+ L0_TXPMD_TM_45_ENABLE_DP_MAIN |
+ L0_TXPMD_TM_45_OVER_DP_POST1 |
+ L0_TXPMD_TM_45_OVER_DP_POST2 |
+ L0_TXPMD_TM_45_ENABLE_DP_POST2;
+ writel(reg, gtr_dev->serdes + offset);
+ offset = gtr_phy->lane * TX_ANA_TM_118_OFFSET +
+ L0_TX_ANA_TM_118;
+ writel(L0_TX_ANA_TM_118_FORCE_17_0,
+ gtr_dev->serdes + offset);
+ }
+
+ /* Do ULPI reset for usb */
+ if (gtr_phy->protocol == ICM_PROTOCOL_USB)
+ ret = xpsgtr_ulpi_reset(gtr_phy);
+
+ /* Select SGMII Mode for GEM and set the PCS Signal detect*/
+ if (gtr_phy->protocol == ICM_PROTOCOL_SGMII)
+ ret = xpsgtr_set_sgmii_pcs(gtr_phy);
+out:
+ mutex_unlock(&gtr_dev->gtr_mutex);
+ return ret;
+}
+
+/**
+ * xpsgtr_set_lanetype - derives lane type from dts arguments
+ * @gtr_phy: pointer to lane
+ * @controller: type of controller
+ * @instance_num: instance number of the controller in case multilane controller
+ *
+ * Return: 0 on success or error on failure
+ */
+static int xpsgtr_set_lanetype(struct xpsgtr_phy *gtr_phy, u8 controller,
+ u8 instance_num)
+{
+ switch (controller) {
+ case PHY_TYPE_SATA:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SATA_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SATA_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_USB3:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_USB0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_USB1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_DP:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_DP_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_DP_1;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_PCIE:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_PCIE_3;
+ else
+ return -EINVAL;
+ break;
+ case PHY_TYPE_SGMII:
+ if (instance_num == 0)
+ gtr_phy->type = XPSGTR_TYPE_SGMII0;
+ else if (instance_num == 1)
+ gtr_phy->type = XPSGTR_TYPE_SGMII1;
+ else if (instance_num == 2)
+ gtr_phy->type = XPSGTR_TYPE_SGMII2;
+ else if (instance_num == 3)
+ gtr_phy->type = XPSGTR_TYPE_SGMII3;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xpsgtr_xlate - provides a PHY specific to a controller
+ * @dev: pointer to device
+ * @args: arguments from dts
+ *
+ * Return: pointer to kernel PHY device or error on failure
+ */
+static struct phy *xpsgtr_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+ struct xpsgtr_phy *gtr_phy = NULL;
+ struct device_node *phynode = args->np;
+ int index;
+ int i;
+ u8 controller;
+ u8 instance_num;
+
+ if (args->args_count != 4) {
+ dev_err(dev, "Invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!of_device_is_available(phynode)) {
+ dev_warn(dev, "requested PHY is disabled\n");
+ return ERR_PTR(-ENODEV);
+ }
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ if (phynode == gtr_dev->phys[index]->phy->dev.of_node) {
+ gtr_phy = gtr_dev->phys[index];
+ break;
+ }
+ }
+ if (!gtr_phy) {
+ dev_err(dev, "failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* get type of controller from phys */
+ controller = args->args[0];
+
+ /* get controller instance number */
+ instance_num = args->args[1];
+
+ /* Check if lane sharing is required */
+ gtr_phy->share_laneclk = args->args[2];
+
+ /* get the required clk rate for controller from phys */
+ gtr_phy->refclk_rate = args->args[3];
+
+ /* derive lane type */
+ if (xpsgtr_set_lanetype(gtr_phy, controller, instance_num) < 0) {
+ dev_err(gtr_dev->dev, "Invalid lane type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* configures SSC settings for a lane */
+ if (xpsgtr_configure_lane(gtr_phy) < 0) {
+ dev_err(gtr_dev->dev, "Invalid clock rate: %d\n",
+ gtr_phy->refclk_rate);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Check Interconnect Matrix is obeyed i.e, given lane type
+ * is allowed to operate on the lane.
+ */
+ for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
+ if (icm_matrix[index][i] == gtr_phy->type)
+ return gtr_phy->phy;
+ }
+
+ /* Should not reach here */
+ return ERR_PTR(-EINVAL);
+}
+
+/**
+ * xpsgtr_phy_exit - clears previous initialized variables
+ * @phy: pointer to kernel PHY device
+ *
+ * Return: 0 on success
+ */
+static int xpsgtr_phy_exit(struct phy *phy)
+{
+ struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+
+ /* As we are exiting, clear skip_phy_init flag */
+ gtr_phy->skip_phy_init = false;
+
+ return 0;
+}
+
+static struct phy_ops xpsgtr_phyops = {
+ .init = xpsgtr_phy_init,
+ .exit = xpsgtr_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * xpsgtr_get_resets - Gets reset signals based on reset-names property
+ * @gtr_dev: pointer to structure which stores reset information
+ *
+ * Return: 0 on success or error value on failure
+ */
+static int xpsgtr_get_resets(struct xpsgtr_dev *gtr_dev)
+{
+ char *name;
+ struct reset_control *rst_temp;
+
+ gtr_dev->sata_rst = devm_reset_control_get(gtr_dev->dev, "sata_rst");
+ if (IS_ERR(gtr_dev->sata_rst)) {
+ name = "sata_rst";
+ rst_temp = gtr_dev->sata_rst;
+ goto error;
+ }
+
+ gtr_dev->dp_rst = devm_reset_control_get(gtr_dev->dev, "dp_rst");
+ if (IS_ERR(gtr_dev->dp_rst)) {
+ name = "dp_rst";
+ rst_temp = gtr_dev->dp_rst;
+ goto error;
+ }
+
+ gtr_dev->usb0_crst = devm_reset_control_get(gtr_dev->dev, "usb0_crst");
+ if (IS_ERR(gtr_dev->usb0_crst)) {
+ name = "usb0_crst";
+ rst_temp = gtr_dev->usb0_crst;
+ goto error;
+ }
+
+ gtr_dev->usb1_crst = devm_reset_control_get(gtr_dev->dev, "usb1_crst");
+ if (IS_ERR(gtr_dev->usb1_crst)) {
+ name = "usb1_crst";
+ rst_temp = gtr_dev->usb1_crst;
+ goto error;
+ }
+
+ gtr_dev->usb0_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_hibrst");
+ if (IS_ERR(gtr_dev->usb0_hibrst)) {
+ name = "usb0_hibrst";
+ rst_temp = gtr_dev->usb0_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_hibrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_hibrst");
+ if (IS_ERR(gtr_dev->usb1_hibrst)) {
+ name = "usb1_hibrst";
+ rst_temp = gtr_dev->usb1_hibrst;
+ goto error;
+ }
+
+ gtr_dev->usb0_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb0_apbrst");
+ if (IS_ERR(gtr_dev->usb0_apbrst)) {
+ name = "usb0_apbrst";
+ rst_temp = gtr_dev->usb0_apbrst;
+ goto error;
+ }
+
+ gtr_dev->usb1_apbrst = devm_reset_control_get(gtr_dev->dev,
+ "usb1_apbrst");
+ if (IS_ERR(gtr_dev->usb1_apbrst)) {
+ name = "usb1_apbrst";
+ rst_temp = gtr_dev->usb1_apbrst;
+ goto error;
+ }
+
+ gtr_dev->gem0_rst = devm_reset_control_get(gtr_dev->dev, "gem0_rst");
+ if (IS_ERR(gtr_dev->gem0_rst)) {
+ name = "gem0_rst";
+ rst_temp = gtr_dev->gem0_rst;
+ goto error;
+ }
+
+ gtr_dev->gem1_rst = devm_reset_control_get(gtr_dev->dev, "gem1_rst");
+ if (IS_ERR(gtr_dev->gem1_rst)) {
+ name = "gem1_rst";
+ rst_temp = gtr_dev->gem1_rst;
+ goto error;
+ }
+
+ gtr_dev->gem2_rst = devm_reset_control_get(gtr_dev->dev, "gem2_rst");
+ if (IS_ERR(gtr_dev->gem2_rst)) {
+ name = "gem2_rst";
+ rst_temp = gtr_dev->gem2_rst;
+ goto error;
+ }
+
+ gtr_dev->gem3_rst = devm_reset_control_get(gtr_dev->dev, "gem3_rst");
+ if (IS_ERR(gtr_dev->gem3_rst)) {
+ name = "gem3_rst";
+ rst_temp = gtr_dev->gem3_rst;
+ goto error;
+ }
+
+ return 0;
+error:
+ dev_err(gtr_dev->dev, "failed to get %s reset signal\n", name);
+ return PTR_ERR(rst_temp);
+}
+
+/**
+ * xpsgtr_probe - The device probe function for driver initialization.
+ * @pdev: pointer to the platform device structure.
+ *
+ * Return: 0 for success and error value on failure
+ */
+static int xpsgtr_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct xpsgtr_dev *gtr_dev;
+ struct phy_provider *provider;
+ struct phy *phy;
+ struct resource *res;
+ char *soc_rev;
+ int lanecount, port = 0, index = 0;
+ int err;
+
+ if (of_device_is_compatible(np, "xlnx,zynqmp-psgtr"))
+ dev_warn(&pdev->dev, "This binding is deprecated, please use new compatible binding\n");
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
+ if (!gtr_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "serdes");
+ gtr_dev->serdes = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->serdes))
+ return PTR_ERR(gtr_dev->serdes);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "siou");
+ gtr_dev->siou = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gtr_dev->siou))
+ return PTR_ERR(gtr_dev->siou);
+
+ lanecount = of_get_child_count(np);
+ if (lanecount > MAX_LANES || lanecount == 0)
+ return -EINVAL;
+
+ gtr_dev->phys = devm_kzalloc(&pdev->dev, sizeof(phy) * lanecount,
+ GFP_KERNEL);
+ if (!gtr_dev->phys)
+ return -ENOMEM;
+
+ gtr_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gtr_dev);
+ mutex_init(&gtr_dev->gtr_mutex);
+
+ /* Deferred probe is also handled if nvmem is not ready */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+ if (IS_ERR(soc_rev))
+ return PTR_ERR(soc_rev);
+
+ if (*soc_rev == ZYNQMP_SILICON_V1)
+ gtr_dev->tx_term_fix = true;
+
+ kfree(soc_rev);
+
+ err = xpsgtr_get_resets(gtr_dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct xpsgtr_phy *gtr_phy;
+
+ gtr_phy = devm_kzalloc(&pdev->dev, sizeof(*gtr_phy),
+ GFP_KERNEL);
+ if (!gtr_phy)
+ return -ENOMEM;
+
+ /* Assign lane number to gtr_phy instance */
+ gtr_phy->lane = index;
+
+ /* Disable lane sharing as default */
+ gtr_phy->share_laneclk = -1;
+
+ gtr_dev->phys[port] = gtr_phy;
+ phy = devm_phy_create(&pdev->dev, child, &xpsgtr_phyops);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev, "failed to create PHY\n");
+ return PTR_ERR(phy);
+ }
+ gtr_dev->phys[port]->phy = phy;
+ phy_set_drvdata(phy, gtr_dev->phys[port]);
+ gtr_phy->data = gtr_dev;
+ port++;
+ index++;
+ }
+ provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(&pdev->dev, "registering provider failed\n");
+ return PTR_ERR(provider);
+ }
+ return 0;
+}
+
+static int xpsgtr_suspend(struct device *dev)
+{
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ /* Save the ICM_CFG registers */
+ gtr_dev->saved_icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ gtr_dev->saved_icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ return 0;
+}
+
+static int xpsgtr_resume(struct device *dev)
+{
+ unsigned int icm_cfg0, icm_cfg1, index;
+ bool skip_phy_init;
+ struct xpsgtr_phy *gtr_phy;
+ struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
+
+ icm_cfg0 = readl(gtr_dev->serdes + ICM_CFG0);
+ icm_cfg1 = readl(gtr_dev->serdes + ICM_CFG1);
+
+ /* Just return if no gt lanes got configured before suspend */
+ if (!gtr_dev->saved_icm_cfg0 && !gtr_dev->saved_icm_cfg1)
+ return 0;
+
+ /* Check if the ICM configurations changed after suspend */
+ if (icm_cfg0 == gtr_dev->saved_icm_cfg0 &&
+ icm_cfg1 == gtr_dev->saved_icm_cfg1)
+ skip_phy_init = true;
+ else
+ skip_phy_init = false;
+
+ /* This below updates the skip_phy_init for all gtr_phy instances `*/
+ for (index = 0; index < of_get_child_count(dev->of_node); index++) {
+ gtr_phy = gtr_dev->phys[index];
+ gtr_phy->skip_phy_init = skip_phy_init;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops xpsgtr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xpsgtr_of_match[] = {
+ { .compatible = "xlnx,zynqmp-psgtr", },
+ { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
+
+static struct platform_driver xpsgtr_driver = {
+ .probe = xpsgtr_probe,
+ .driver = {
+ .name = "xilinx-psgtr",
+ .of_match_table = xpsgtr_of_match,
+ .pm = &xpsgtr_pm_ops,
+ },
+};
+
+module_platform_driver(xpsgtr_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Xilinx ZynqMP High speed Gigabit Transceiver");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 834c59950d1c..280783cfe998 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -384,6 +384,14 @@ config PINCTRL_RK805
help
This selects the pinctrl driver for RK805.
+config PINCTRL_ZYNQMP
+ bool "Pinctrl driver for Xilinx ZynqMP"
+ depends on ARCH_ZYNQMP
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selects the pinctrl driver for Xilinx ZynqMP.
+
config PINCTRL_OCELOT
bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0b36a1cfca8a..348a2acf4d23 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
+obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index c6052a0e827a..4d16c19afc61 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -8,6 +8,7 @@
*/
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -1208,8 +1209,4 @@ static struct platform_driver zynq_pinctrl_driver = {
.probe = zynq_pinctrl_probe,
};
-static int __init zynq_pinctrl_init(void)
-{
- return platform_driver_register(&zynq_pinctrl_driver);
-}
-arch_initcall(zynq_pinctrl_init);
+module_platform_driver(zynq_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
new file mode 100644
index 000000000000..c76cb6b5ad3f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ZynqMP pin controller
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajanv@xilinx.com>
+ * Chirag Parekh <chirag.parekh@xilinx.com>
+ */
+
+#include <linux/module.h>
+#include <dt-bindings/pinctrl/pinctrl-zynqmp.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define ZYNQMP_PIN_PREFIX "MIO"
+#define PINCTRL_GET_FUNC_NAME_RESP_LEN 16
+#define MAX_FUNC_NAME_LEN 16
+#define MAX_GROUP_PIN 50
+#define END_OF_FUNCTIONS "END_OF_FUNCTIONS"
+#define NUM_GROUPS_PER_RESP 6
+
+#define PINCTRL_GET_FUNC_GROUPS_RESP_LEN 12
+#define PINCTRL_GET_PIN_GROUPS_RESP_LEN 12
+#define NA_GROUP -1
+#define RESERVED_GROUP -2
+
+/**
+ * struct zynqmp_pmux_function - a pinmux function
+ * @name: Name of the pinmux function
+ * @groups: List of pingroups for this function
+ * @ngroups: Number of entries in @groups
+ * @node:` Firmware node matching with for function
+ *
+ * This structure holds information about pin control function
+ * and function group names supporting that function.
+ */
+struct zynqmp_pmux_function {
+ char name[MAX_FUNC_NAME_LEN];
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct zynqmp_pinctrl - driver data
+ * @pctrl: Pinctrl device
+ * @groups: Pingroups
+ * @ngroups: Number of @groups
+ * @funcs: Pinmux functions
+ * @nfuncs: Number of @funcs
+ *
+ * This struct is stored as driver data and used to retrieve
+ * information regarding pin control functions, groups and
+ * group pins.
+ */
+struct zynqmp_pinctrl {
+ struct pinctrl_dev *pctrl;
+ const struct zynqmp_pctrl_group *groups;
+ unsigned int ngroups;
+ const struct zynqmp_pmux_function *funcs;
+ unsigned int nfuncs;
+};
+
+/**
+ * struct zynqmp_pctrl_group - Pin control group info
+ * @name: Group name
+ * @pins: Group pin numbers
+ * @npins: Number of pins in group
+ */
+struct zynqmp_pctrl_group {
+ const char *name;
+ unsigned int pins[MAX_GROUP_PIN];
+ unsigned int npins;
+};
+
+/**
+ * enum zynqmp_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard,
+ * the argument to this parameter (on a
+ * custom format) tells the driver which
+ * alternative IO standard to use
+ * @PIN_CONFIG_SCHMITTCMOS: this parameter (on a custom format) allows
+ * to select schmitt or cmos input for MIO pins
+ */
+enum zynqmp_pin_config_param {
+ PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
+ PIN_CONFIG_SCHMITTCMOS,
+};
+
+static const struct pinconf_generic_params zynqmp_dt_params[] = {
+ {"io-standard", PIN_CONFIG_IOSTANDARD, IO_STANDARD_LVCMOS18},
+ {"schmitt-cmos", PIN_CONFIG_SCHMITTCMOS, PIN_INPUT_TYPE_SCHMITT},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct
+pin_config_item zynqmp_conf_items[ARRAY_SIZE(zynqmp_dt_params)] = {
+ PCONFDUMP(PIN_CONFIG_IOSTANDARD, "IO-standard", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SCHMITTCMOS, "schmitt-cmos", NULL, true),
+};
+#endif
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+static const struct pinctrl_pin_desc zynqmp_pins;
+static struct pinctrl_desc zynqmp_desc;
+
+/**
+ * zynqmp_pctrl_get_groups_count() - get group count
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total groups count.
+ *
+ * Return: group count.
+ */
+static int zynqmp_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->ngroups;
+}
+
+/**
+ * zynqmp_pctrl_get_group_name() - get group name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ *
+ * Get gorup's name.
+ *
+ * Return: group name.
+ */
+static const char *zynqmp_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->groups[selector].name;
+}
+
+/**
+ * zynqmp_pctrl_get_group_pins() - get group pins
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @pins: Pin numbers.
+ * @npins: Number of pins in group.
+ *
+ * Get gorup's pin count and pin number.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *npins)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pctrl->groups[selector].pins;
+ *npins = pctrl->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops zynqmp_pctrl_ops = {
+ .get_groups_count = zynqmp_pctrl_get_groups_count,
+ .get_group_name = zynqmp_pctrl_get_group_name,
+ .get_group_pins = zynqmp_pctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+/**
+ * zynqmp_pinmux_request_pin() - Request a pin for muxing
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Request a pin from firmware for muxing.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_request_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_request)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_request(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "request failed for pin %u\n", pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pmux_get_functions_count() - get number of functions
+ * @pctldev: Pincontrol device pointer.
+ *
+ * Get total function count.
+ *
+ * Return: function count.
+ */
+static int zynqmp_pmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->nfuncs;
+}
+
+/**
+ * zynqmp_pmux_get_function_name() - get function name
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID.
+ *
+ * Get function's name.
+ *
+ * Return: function name.
+ */
+static const char *zynqmp_pmux_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctrl->funcs[selector].name;
+}
+
+/**
+ * zynqmp_pmux_get_function_groups() - Get groups for the function
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Function ID
+ * @groups: Group names.
+ * @num_groups: Number of function groups.
+ *
+ * Get function's group count and group names.
+ *
+ * Return: Success.
+ */
+static int zynqmp_pmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctrl->funcs[selector].groups;
+ *num_groups = pctrl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_set_mux() - Set requested function for the group
+ * @pctldev: Pincontrol device pointer.
+ * @function: Function ID.
+ * @group: Group ID.
+ *
+ * Loop though all pins of group and call firmware API
+ * to set requested function for all pins in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[group];
+ int ret, i;
+
+ if (!eemi_ops->pinctrl_set_function)
+ return -ENOTSUPP;
+
+ for (i = 0; i < pgrp->npins; i++) {
+ unsigned int pin = pgrp->pins[i];
+
+ ret = eemi_ops->pinctrl_set_function(pin, function);
+ if (ret) {
+ dev_err(pctldev->dev, "set mux failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinmux_release_pin() - Release a pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ *
+ * Release a pin from firmware.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinmux_release_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ int ret;
+
+ if (!eemi_ops->pinctrl_release)
+ return -ENOTSUPP;
+
+ ret = eemi_ops->pinctrl_release(pin);
+ if (ret) {
+ dev_err(pctldev->dev, "free pin failed for pin %u\n",
+ pin);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops zynqmp_pinmux_ops = {
+ .request = zynqmp_pinmux_request_pin,
+ .get_functions_count = zynqmp_pmux_get_functions_count,
+ .get_function_name = zynqmp_pmux_get_function_name,
+ .get_function_groups = zynqmp_pmux_get_function_groups,
+ .set_mux = zynqmp_pinmux_set_mux,
+ .free = zynqmp_pinmux_release_pin,
+};
+
+/**
+ * zynqmp_pinconf_cfg_get() - get config value for the pin
+ * @pctldev: Pin control device pointer.
+ * @pin: Pin number.
+ * @config: Value of config param.
+ *
+ * Get value of the requested configuration parameter for the
+ * given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_get(struct pinctrl_dev *pctldev,
+ unsigned int pin,
+ unsigned long *config)
+{
+ int ret;
+ unsigned int arg = 0, param = pinconf_to_config_param(*config);
+
+ if (!eemi_ops->pinctrl_get_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ &arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_UP)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ &arg);
+ if (arg != PM_PINCTRL_BIAS_DISABLE)
+ return -EINVAL;
+ arg = 1;
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &arg);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ &arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ &arg);
+ switch (arg) {
+ case PM_PINCTRL_DRIVE_STRENGTH_2MA:
+ arg = DRIVE_STRENGTH_2MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_4MA:
+ arg = DRIVE_STRENGTH_4MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_8MA:
+ arg = DRIVE_STRENGTH_8MA;
+ break;
+ case PM_PINCTRL_DRIVE_STRENGTH_12MA:
+ arg = DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_cfg_set() - Set requested config for the pin
+ * @pctldev: Pincontrol device pointer.
+ * @pin: Pin number.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Loop though all configurations and call firmware API
+ * to set requested configurations for the pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ if (!eemi_ops->pinctrl_set_config)
+ return -ENOTSUPP;
+
+ if (pin >= zynqmp_desc.npins)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ unsigned int param = pinconf_to_config_param(configs[i]);
+ unsigned int arg = pinconf_to_config_argument(configs[i]);
+ unsigned int value;
+
+ switch (param) {
+ case PIN_CONFIG_SLEW_RATE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ arg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_UP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_BIAS_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_BIAS_DISABLE);
+ break;
+ case PIN_CONFIG_SCHMITTCMOS:
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ arg);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ switch (arg) {
+ case DRIVE_STRENGTH_2MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_2MA;
+ break;
+ case DRIVE_STRENGTH_4MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_4MA;
+ break;
+ case DRIVE_STRENGTH_8MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_8MA;
+ break;
+ case DRIVE_STRENGTH_12MA:
+ value = PM_PINCTRL_DRIVE_STRENGTH_12MA;
+ break;
+ default:
+ /* Invalid drive strength */
+ dev_warn(pctldev->dev,
+ "Invalid drive strength for pin %d\n",
+ pin);
+ return -EINVAL;
+ }
+
+ ret = eemi_ops->pinctrl_set_config(pin,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ value);
+ break;
+ case PIN_CONFIG_IOSTANDARD:
+ ret = eemi_ops->pinctrl_get_config(pin,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ &value);
+
+ if (arg != value)
+ dev_warn(pctldev->dev,
+ "Invalid IO Standard requested for pin %d\n",
+ pin);
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ case PIN_CONFIG_LOW_POWER_MODE:
+ /*
+ * This cases are mentioned in dts but configurable
+ * registers are unknown. So falling through to ignore
+ * boot time warnings as of now.
+ */
+ ret = 0;
+ break;
+ default:
+ dev_warn(pctldev->dev,
+ "unsupported configuration parameter '%u'\n",
+ param);
+ ret = -ENOTSUPP;
+ break;
+ }
+ if (ret)
+ dev_warn(pctldev->dev,
+ "%s failed: pin %u param %u value %u\n",
+ __func__, pin, param, arg);
+ }
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinconf_group_set() - Set requested config for the group
+ * @pctldev: Pincontrol device pointer.
+ * @selector: Group ID.
+ * @configs: Configuration to set.
+ * @num_groups: Number of configurations.
+ *
+ * Call function to set configs for each pin in group.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+ struct zynqmp_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct zynqmp_pctrl_group *pgrp = &pctrl->groups[selector];
+
+ for (i = 0; i < pgrp->npins; i++) {
+ ret = zynqmp_pinconf_cfg_set(pctldev, pgrp->pins[i], configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops zynqmp_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = zynqmp_pinconf_cfg_get,
+ .pin_config_set = zynqmp_pinconf_cfg_set,
+ .pin_config_group_set = zynqmp_pinconf_group_set,
+};
+
+static struct pinctrl_desc zynqmp_desc = {
+ .name = "zynqmp_pinctrl",
+ .owner = THIS_MODULE,
+ .pctlops = &zynqmp_pctrl_ops,
+ .pmxops = &zynqmp_pinmux_ops,
+ .confops = &zynqmp_pinconf_ops,
+};
+
+/**
+ * zynqmp_pinctrl_get_function_groups() - get groups for the function
+ * @fid: Function ID.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_groups(u32 fid, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_FUNC_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_func_num_groups() - get number of groups in function
+ * @fid: Function ID.
+ * @ngroups: Number of groups in function.
+ *
+ * Call firmware API to get number of group in function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_func_num_groups(u32 fid, unsigned int *ngroups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS;
+ qdata.arg1 = fid;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *ngroups = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_func_groups() - prepare function and groups data
+ * @dev: Device pointer.
+ * @fid: Function ID.
+ * @func: Function data.
+ * @groups: Groups data.
+ *
+ * Query firmware to get group IDs for each function. Firmware returns
+ * group IDs. Based on gorup index for the function, group names in
+ * function are stored. For example, first gorup in "eth0" function
+ * is named as "eth0_0", second as "eth0_1" and so on.
+ *
+ * Based on group ID received from firmware, function stores name of
+ * group for that group ID. For an example, if "eth0" first group ID
+ * is x, groups[x] name will be stored as "eth0_0".
+ *
+ * Once done for each function, each function would have its group names,
+ * and each groups would also have their names.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_func_groups(struct device *dev, u32 fid,
+ struct zynqmp_pmux_function *func,
+ struct zynqmp_pctrl_group *groups)
+{
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+ const char **fgroups;
+ int ret, index, i;
+
+ fgroups = devm_kzalloc(dev, sizeof(*fgroups) * func->ngroups,
+ GFP_KERNEL);
+ if (!fgroups)
+ return -ENOMEM;
+
+ for (index = 0; index < func->ngroups; index += NUM_GROUPS_PER_RESP) {
+ ret = zynqmp_pinctrl_get_function_groups(fid, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ fgroups[index + i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ groups[resp[i]].name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%d_grp",
+ func->name,
+ index + i);
+ }
+ }
+done:
+ func->groups = fgroups;
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_function_name() - get function name
+ * @fid: Function ID.
+ * @name: Function name
+ *
+ * Call firmware API to get name of given function.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_function_name(u32 fid, char *name)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ qdata.qid = PM_QID_PINCTRL_GET_FUNCTION_NAME;
+ qdata.arg1 = fid;
+
+ eemi_ops->query_data(qdata, ret_payload);
+ memcpy(name, ret_payload, PINCTRL_GET_FUNC_NAME_RESP_LEN);
+
+ return 0;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_functions() - get number of supported functions
+ * @nfuncs: Number of functions.
+ *
+ * Call firmware API to get number of functions supported by system/board.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_functions(unsigned int *nfuncs)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_FUNCTIONS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *nfuncs = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_pin_groups() - get groups for the pin
+ * @pin: Pin number.
+ * @index: Group index.
+ * @groups: Groups data.
+ *
+ * Call firmware API to get groups for the given pin.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_pin_groups(u32 pin, u32 index, u16 *groups)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_PIN_GROUPS;
+ qdata.arg1 = pin;
+ qdata.arg2 = index;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ memcpy(groups, &ret_payload[1], PINCTRL_GET_PIN_GROUPS_RESP_LEN);
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_group_add_pin() - add pin to given group
+ * @group: Group data.
+ * @pin: Pin number.
+ *
+ * Add pin number to respective group's pin array at end and
+ * increment pin count for the group.
+ *
+ * Return: 0 on success else error code.
+ */
+static void zynqmp_pinctrl_group_add_pin(struct zynqmp_pctrl_group *group,
+ unsigned int pin)
+{
+ group->pins[group->npins++] = pin;
+}
+
+/**
+ * zynqmp_pinctrl_create_pin_groups() - assign pins to respective groups
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @pin: Pin number.
+ *
+ * Query firmware to get groups available for the given pin.
+ * Based on firmware response(group IDs for the pin), add
+ * pin number to respective group's pin array.
+ *
+ * Once all pins are queries, each groups would have its number
+ * of pins and pin numbers data.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_create_pin_groups(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int pin)
+{
+ int ret, i, index = 0;
+ u16 resp[NUM_GROUPS_PER_RESP] = {0};
+
+ do {
+ ret = zynqmp_pinctrl_get_pin_groups(pin, index, resp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NUM_GROUPS_PER_RESP; i++) {
+ if (resp[i] == (u16)NA_GROUP)
+ goto done;
+ if (resp[i] == (u16)RESERVED_GROUP)
+ continue;
+ zynqmp_pinctrl_group_add_pin(&groups[resp[i]], pin);
+ }
+ index += NUM_GROUPS_PER_RESP;
+ } while (1);
+
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_group_pins() - prepare each group's pin data
+ * @dev: Device pointer.
+ * @groups: Groups data.
+ * @ngroups: Number of groups.
+ *
+ * Prepare pin number and number of pins data for each pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_group_pins(struct device *dev,
+ struct zynqmp_pctrl_group *groups,
+ unsigned int ngroups)
+{
+ unsigned int pin;
+ int ret;
+
+ for (pin = 0; pin < zynqmp_desc.npins; pin++) {
+ ret = zynqmp_pinctrl_create_pin_groups(dev, groups, pin);
+ if (ret)
+ goto done;
+ }
+done:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_function_info() - prepare function info
+ * @dev: Device pointer.
+ * @pctrl: Pin control driver data.
+ *
+ * Query firmware for functions, groups and pin information and
+ * prepare pin control driver data.
+ *
+ * Query number of functions and number of function groups (number
+ * of groups in given function) to allocate required memory buffers
+ * for functions and groups. Once buffers are allocated to store
+ * functions and groups data, query and store required information
+ * (numbe of groups and group names for each function, number of
+ * pins and pin numbers for each group).
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_function_info(struct device *dev,
+ struct zynqmp_pinctrl *pctrl)
+{
+ struct zynqmp_pmux_function *funcs;
+ struct zynqmp_pctrl_group *groups;
+ int ret, i;
+
+ ret = zynqmp_pinctrl_get_num_functions(&pctrl->nfuncs);
+ if (ret)
+ return ret;
+
+ funcs = devm_kzalloc(dev, sizeof(*funcs) * pctrl->nfuncs, GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ zynqmp_pinctrl_get_function_name(i, funcs[i].name);
+
+ ret = zynqmp_pinctrl_get_func_num_groups(i, &funcs[i].ngroups);
+ if (ret)
+ goto err;
+ pctrl->ngroups += funcs[i].ngroups;
+ }
+
+ groups = devm_kzalloc(dev, sizeof(*groups) * pctrl->ngroups,
+ GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < pctrl->nfuncs; i++) {
+ ret = zynqmp_pinctrl_prepare_func_groups(dev, i, &funcs[i],
+ groups);
+ if (ret)
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_group_pins(dev, groups, pctrl->ngroups);
+ if (ret)
+ goto err;
+
+ pctrl->funcs = funcs;
+ pctrl->groups = groups;
+
+err:
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_get_num_pins() - get number of pins in system
+ * @npins: Number of pins in system/board.
+ *
+ * Call firmware API to get number of pins.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_get_num_pins(unsigned int *npins)
+{
+ struct zynqmp_pm_query_data qdata = {0};
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ qdata.qid = PM_QID_PINCTRL_GET_NUM_PINS;
+
+ ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
+ *npins = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pinctrl_prepare_pin_desc() - prepare pin description info
+ * @dev: Device pointer.
+ * @zynqmp_pins: Pin information.
+ * @npins: Number of pins.
+ *
+ * Query number of pins information from firmware and prepare pin
+ * description containing pin number and pin name.
+ *
+ * Return: 0 on success else error code.
+ */
+static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
+ const struct pinctrl_pin_desc **zynqmp_pins,
+ unsigned int *npins)
+{
+ struct pinctrl_pin_desc *pins, *pin;
+ int ret;
+ int i;
+
+ ret = zynqmp_pinctrl_get_num_pins(npins);
+ if (ret)
+ return ret;
+
+ pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < *npins; i++) {
+ pin = &pins[i];
+ pin->number = i;
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ }
+
+ *zynqmp_pins = pins;
+
+ return 0;
+}
+
+static int zynqmp_pinctrl_probe(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl;
+ int ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,pinctrl-zynqmp")) {
+ dev_err(&pdev->dev, "ERROR: This binding is deprecated, please use new compatible binding\n");
+ return -ENOENT;
+ }
+
+ pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ if (!eemi_ops->query_data) {
+ dev_err(&pdev->dev, "%s: Firmware interface not available\n",
+ __func__);
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_pin_desc(&pdev->dev,
+ &zynqmp_desc.pins,
+ &zynqmp_desc.npins);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() pin desc prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ ret = zynqmp_pinctrl_prepare_function_info(&pdev->dev, pctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "%s() function info prepare fail with %d\n",
+ __func__, ret);
+ goto err;
+ }
+
+ pctrl->pctrl = pinctrl_register(&zynqmp_desc, &pdev->dev, pctrl);
+ if (IS_ERR(pctrl->pctrl)) {
+ ret = PTR_ERR(pctrl->pctrl);
+ goto err;
+ }
+ platform_set_drvdata(pdev, pctrl);
+
+ dev_info(&pdev->dev, "zynqmp pinctrl initialized\n");
+err:
+ return ret;
+}
+
+static int zynqmp_pinctrl_remove(struct platform_device *pdev)
+{
+ struct zynqmp_pinctrl *pctrl = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pctrl->pctrl);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_pinctrl_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pinctrl" },
+ { .compatible = "xlnx,pinctrl-zynqmp" },
+ { }
+};
+
+static struct platform_driver zynqmp_pinctrl_driver = {
+ .driver = {
+ .name = "zynqmp-pinctrl",
+ .of_match_table = zynqmp_pinctrl_of_match,
+ },
+ .probe = zynqmp_pinctrl_probe,
+ .remove = zynqmp_pinctrl_remove,
+};
+
+module_platform_driver(zynqmp_pinctrl_driver);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index fbaed079b299..5535fa5f4a9d 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -222,6 +222,26 @@ config ST_REMOTEPROC
processor framework.
This can be either built-in or a loadable module.
+config ZYNQ_REMOTEPROC
+ tristate "Support ZYNQ remoteproc"
+ depends on ARCH_ZYNQ && SMP && !DEBUG_SG
+ select RPMSG_VIRTIO
+ select HOTPLUG_CPU
+ select SRAM
+ help
+ Say y here to support Xilinx ZynQ remote processors (the second
+ ARM CORTEX-A9 cpu) via the remote processor framework.
+
+config ZYNQMP_R5_REMOTEPROC
+ tristate "ZynqMP_r5 remoteproc support"
+ depends on ARM64 && PM && ARCH_ZYNQMP
+ select RPMSG_VIRTIO
+ select MAILBOX
+ select ZYNQMP_IPI_MBOX
+ help
+ Say y here to support ZynqMP R5 remote processors via the remote
+ processor framework.
+
config ST_SLIM_REMOTEPROC
tristate
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 0effd3825035..11ad93c7c13b 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -27,5 +27,7 @@ obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_ZYNQ_REMOTEPROC) += zynq_remoteproc.o
+obj-$(CONFIG_ZYNQMP_R5_REMOTEPROC) += zynqmp_r5_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index b389dc79da81..ba8caabb71e9 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -129,4 +129,27 @@ bool rproc_u64_fit_in_size_t(u64 val)
return (val <= (size_t) -1);
}
+static inline
+bool rproc_allow_sysfs_kick(struct rproc *rproc)
+{
+ return (rproc->sysfs_kick) ? true : false;
+}
+
+static inline
+bool rproc_peek_remote_kick(struct rproc *rproc, char *buf, size_t *len)
+{
+ if (rproc->ops->peek_remote_kick)
+ return rproc->ops->peek_remote_kick(rproc, buf, len);
+ else
+ return false;
+}
+
+static inline
+void rproc_ack_remote_kick(struct rproc *rproc)
+{
+ if (rproc->ops->ack_remote_kick)
+ rproc->ops->ack_remote_kick(rproc);
+}
+
+int rproc_create_kick_sysfs(struct rproc *rproc);
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 52b871327b55..a093f18386a9 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -114,6 +114,85 @@ static ssize_t state_store(struct device *dev,
}
static DEVICE_ATTR_RW(state);
+/**
+ * kick_store() - Kick remote from sysfs.
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will just raise a signal, no content is expected for now.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int id;
+ size_t cpy_len;
+
+ (void)attr;
+ cpy_len = count <= sizeof(id) ? count : sizeof(id);
+ memcpy((char *)(&id), buf, cpy_len);
+
+ if (rproc->ops->kick)
+ rproc->ops->kick(rproc, id);
+ else
+ count = -EINVAL;
+ return count;
+}
+static DEVICE_ATTR_WO(kick);
+
+/**
+ * remote_kick_show() - Check if remote has kicked
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ *
+ * It will check if the remote has kicked.
+ *
+ * Return: always 2, and the value in the sysfs buffer
+ * shows if the remote has kicked. '0' - not kicked, '1' - kicked.
+ */
+static ssize_t remote_kick_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ buf[0] = '0';
+ buf[1] = '\n';
+ if (rproc_peek_remote_kick(rproc, NULL, NULL))
+ buf[0] = '1';
+ return 2;
+}
+
+/**
+ * remote_kick_store() - Ack the kick from remote
+ * @dev: remoteproc device
+ * @attr: sysfs device attribute
+ * @buf: sysfs buffer
+ * @count: size of the contents in buf
+ *
+ * It will ack the remote, no response contents is expected.
+ *
+ * Return: the input count if it allows kick from sysfs,
+ * as it is always expected to succeed.
+ */
+static ssize_t remote_kick_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ rproc_ack_remote_kick(rproc);
+ return count;
+}
+static DEVICE_ATTR_RW(remote_kick);
+
/* Expose the name of the remote processor via sysfs */
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -145,6 +224,34 @@ struct class rproc_class = {
.dev_groups = rproc_devgroups,
};
+/**
+ * rproc_create_kick_sysfs() - create kick remote sysfs entry
+ * @rproc: remoteproc
+ *
+ * It will create kick remote sysfs entry if kick remote
+ * from sysfs is allowed.
+ *
+ * Return: 0 for success, and negative value for failure.
+ */
+int rproc_create_kick_sysfs(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ if (!rproc_allow_sysfs_kick(rproc))
+ return -EINVAL;
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_kick.attr);
+ if (ret) {
+ dev_err(dev, "failed to create sysfs for kick.\n");
+ return ret;
+ }
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_remote_kick.attr);
+ if (ret)
+ dev_err(dev, "failed to create sysfs for remote kick.\n");
+ return ret;
+}
+EXPORT_SYMBOL(rproc_create_kick_sysfs);
+
int __init rproc_init_sysfs(void)
{
/* create remoteproc device class for sysfs */
diff --git a/drivers/remoteproc/zynq_remoteproc.c b/drivers/remoteproc/zynq_remoteproc.c
new file mode 100644
index 000000000000..03a6cddc2ce4
--- /dev/null
+++ b/drivers/remoteproc/zynq_remoteproc.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ *
+ * Based on origin OMAP Remote Processor driver
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/smp.h>
+#include <linux/irqchip/arm-gic.h>
+#include <asm/outercache.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/genalloc.h>
+#include <../../arch/arm/mach-zynq/common.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_NUM_VRINGS 2
+#define NOTIFYID_ANY (-1)
+/* Maximum on chip memories used by the driver*/
+#define MAX_ON_CHIP_MEMS 32
+
+/* Structure for storing IRQs */
+struct irq_list {
+ int irq;
+ struct list_head list;
+};
+
+/* Structure for IPIs */
+struct ipi_info {
+ u32 irq;
+ u32 notifyid;
+ bool pending;
+};
+
+/**
+ * struct zynq_mem_res - zynq memory resource for firmware memory
+ * @res: memory resource
+ * @node: list node
+ */
+struct zynq_mem_res {
+ struct resource res;
+ struct list_head node;
+};
+
+/**
+ * struct zynq_rproc_data - zynq rproc private data
+ * @irqs: inter processor soft IRQs
+ * @rproc: pointer to remoteproc instance
+ * @ipis: interrupt processor interrupts statistics
+ * @fw_mems: list of firmware memories
+ */
+struct zynq_rproc_pdata {
+ struct irq_list irqs;
+ struct rproc *rproc;
+ struct ipi_info ipis[MAX_NUM_VRINGS];
+ struct list_head fw_mems;
+};
+
+static bool autoboot __read_mostly;
+
+/* Store rproc for IPI handler */
+static struct rproc *rproc;
+static struct work_struct workqueue;
+
+static void handle_event(struct work_struct *work)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ if (rproc_vq_interrupt(local->rproc, local->ipis[0].notifyid) ==
+ IRQ_NONE)
+ dev_dbg(rproc->dev.parent, "no message found in vqid 0\n");
+}
+
+static void ipi_kick(void)
+{
+ dev_dbg(rproc->dev.parent, "KICK Linux because of pending message\n");
+ schedule_work(&workqueue);
+}
+
+static void kick_pending_ipi(struct rproc *rproc)
+{
+ struct zynq_rproc_pdata *local = rproc->priv;
+ int i;
+
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+ /* Send swirq to firmware */
+ if (local->ipis[i].pending) {
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ local->ipis[i].pending = false;
+ }
+ }
+}
+
+static int zynq_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+ INIT_WORK(&workqueue, handle_event);
+
+ ret = remove_cpu(1);
+ /* EBUSY means CPU is already released */
+ if (ret && (ret != -EBUSY)) {
+ dev_err(dev, "Can't release cpu1\n");
+ return ret;
+ }
+
+ ret = zynq_cpun_start(rproc->bootaddr, 1);
+ /* Trigger pending kicks */
+ kick_pending_ipi(rproc);
+
+ return ret;
+}
+
+/* kick a firmware */
+static void zynq_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynq_rproc_pdata *local = rproc->priv;
+ struct rproc_vdev *rvdev, *rvtmp;
+ int i;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
+ for (i = 0; i < MAX_NUM_VRINGS; i++) {
+ struct rproc_vring *rvring = &rvdev->vring[i];
+
+ /* Send swirq to firmware */
+ if (rvring->notifyid == vqid) {
+ local->ipis[i].notifyid = vqid;
+ /* As we do not turn off CPU1 until start,
+ * we delay firmware kick
+ */
+ if (rproc->state == RPROC_RUNNING)
+ gic_raise_softirq(cpumask_of(1),
+ local->ipis[i].irq);
+ else
+ local->ipis[i].pending = true;
+ }
+ }
+ }
+}
+
+/* power off the remote processor */
+static int zynq_rproc_stop(struct rproc *rproc)
+{
+ int ret;
+ struct device *dev = rproc->dev.parent;
+
+ dev_dbg(rproc->dev.parent, "%s\n", __func__);
+
+ /* Cpu can't be power on - for example in nosmp mode */
+ ret = add_cpu(1);
+ if (ret)
+ dev_err(dev, "Can't power on cpu1 %d\n", ret);
+
+ return 0;
+}
+
+static int zynq_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int num_mems, i, ret;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct rproc_mem_entry *mem;
+
+ num_mems = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (num_mems <= 0)
+ return 0;
+ for (i = 0; i < num_mems; i++) {
+ struct device_node *node;
+ struct reserved_mem *rmem;
+
+ node = of_parse_phandle(np, "memory-region", i);
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+ if (strstr(node->name, "vdev") &&
+ strstr(node->name, "buffer")) {
+ /* Register DMA region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ NULL, NULL,
+ node->name);
+ if (!mem) {
+ dev_err(dev,
+ "unable to initialize memory-region %s \n",
+ node->name);
+ return -ENOMEM;
+ }
+ rproc_add_carveout(rproc, mem);
+ } else if (strstr(node->name, "vdev") &&
+ strstr(node->name, "vring")) {
+ /* Register vring */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ NULL, NULL,
+ node->name);
+ mem->va = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!mem->va)
+ return -ENOMEM;
+ if (!mem) {
+ dev_err(dev,
+ "unable to initialize memory-region %s\n",
+ node->name);
+ return -ENOMEM;
+ }
+ rproc_add_carveout(rproc, mem);
+ } else {
+ mem = rproc_of_resm_mem_entry_init(dev, i,
+ rmem->size,
+ rmem->base,
+ node->name);
+ if (!mem) {
+ dev_err(dev,
+ "unable to initialize memory-region %s \n",
+ node->name);
+ return -ENOMEM;
+ }
+ mem->va = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!mem->va)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+ }
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ ret = 0;
+ return ret;
+}
+
+static struct rproc_ops zynq_rproc_ops = {
+ .start = zynq_rproc_start,
+ .stop = zynq_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = zynq_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .kick = zynq_rproc_kick,
+};
+
+/* Just to detect bug if interrupt forwarding is broken */
+static irqreturn_t zynq_remoteproc_interrupt(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+
+ dev_err(dev, "GIC IRQ %d is not forwarded correctly\n", irq);
+
+ /*
+ * MS: Calling this function doesn't need to be BUG
+ * especially for cases where firmware doesn't disable
+ * interrupts. In next probing can be som interrupts pending.
+ * The next scenario is for cases when you want to monitor
+ * non frequent interrupt through Linux kernel. Interrupt happen
+ * and it is forwarded to Linux which update own statistic
+ * in (/proc/interrupt) and forward it to firmware.
+ *
+ * gic_set_cpu(1, irq); - setup cpu1 as destination cpu
+ * gic_raise_softirq(cpumask_of(1), irq); - forward irq to firmware
+ */
+
+ gic_set_cpu(1, irq);
+ return IRQ_HANDLED;
+}
+
+static void clear_irq(struct rproc *rproc)
+{
+ struct list_head *pos, *q;
+ struct irq_list *tmp;
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ dev_info(rproc->dev.parent, "Deleting the irq_list\n");
+ list_for_each_safe(pos, q, &local->irqs.list) {
+ tmp = list_entry(pos, struct irq_list, list);
+ free_irq(tmp->irq, rproc->dev.parent);
+ gic_set_cpu(0, tmp->irq);
+ list_del(pos);
+ kfree(tmp);
+ }
+}
+
+static int zynq_remoteproc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct irq_list *tmp;
+ int count = 0;
+ struct zynq_rproc_pdata *local;
+
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev),
+ &zynq_rproc_ops, NULL,
+ sizeof(struct zynq_rproc_pdata));
+ if (!rproc) {
+ dev_err(&pdev->dev, "rproc allocation failed\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+ local = rproc->priv;
+ local->rproc = rproc;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
+ goto dma_mask_fault;
+ }
+
+ /* Init list for IRQs - it can be long list */
+ INIT_LIST_HEAD(&local->irqs.list);
+
+ /* Alloc IRQ based on DTS to be sure that no other driver will use it */
+ while (1) {
+ int irq;
+
+ irq = platform_get_irq(pdev, count++);
+ if (irq == -ENXIO || irq == -EINVAL)
+ break;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto irq_fault;
+ }
+
+ tmp->irq = irq;
+
+ dev_dbg(&pdev->dev, "%d: Alloc irq: %d\n", count, tmp->irq);
+
+ /* Allocating shared IRQs will ensure that any module will
+ * use these IRQs
+ */
+ ret = request_irq(tmp->irq, zynq_remoteproc_interrupt, 0,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ %d already allocated\n",
+ tmp->irq);
+ goto irq_fault;
+ }
+
+ /*
+ * MS: Here is place for detecting problem with firmware
+ * which doesn't work correctly with interrupts
+ *
+ * MS: Comment if you want to count IRQs on Linux
+ */
+ gic_set_cpu(1, tmp->irq);
+ list_add(&tmp->list, &local->irqs.list);
+ }
+
+ /* Allocate free IPI number */
+ /* Read vring0 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring0",
+ &local->ipis[0].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto irq_fault;
+ }
+
+ ret = set_ipi_handler(local->ipis[0].irq, ipi_kick,
+ "Firmware kick");
+ if (ret) {
+ dev_err(&pdev->dev, "IPI handler already registered\n");
+ goto irq_fault;
+ }
+
+ /* Read vring1 ipi number */
+ ret = of_property_read_u32(pdev->dev.of_node, "vring1",
+ &local->ipis[1].irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to read property");
+ goto ipi_fault;
+ }
+
+ rproc->auto_boot = autoboot;
+
+ ret = rproc_add(local->rproc);
+ if (ret) {
+ dev_err(&pdev->dev, "rproc registration failed\n");
+ goto ipi_fault;
+ }
+
+ return 0;
+
+ipi_fault:
+ clear_ipi_handler(local->ipis[0].irq);
+
+irq_fault:
+ clear_irq(rproc);
+
+dma_mask_fault:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int zynq_remoteproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct zynq_rproc_pdata *local = rproc->priv;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ rproc_del(rproc);
+
+ clear_ipi_handler(local->ipis[0].irq);
+ clear_irq(rproc);
+
+ of_reserved_mem_device_release(&pdev->dev);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynq_remoteproc_match[] = {
+ { .compatible = "xlnx,zynq_remoteproc", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynq_remoteproc_match);
+
+static struct platform_driver zynq_remoteproc_driver = {
+ .probe = zynq_remoteproc_probe,
+ .remove = zynq_remoteproc_remove,
+ .driver = {
+ .name = "zynq_remoteproc",
+ .of_match_table = zynq_remoteproc_match,
+ },
+};
+module_platform_driver(zynq_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: false)");
+
+MODULE_AUTHOR("Michal Simek <monstr@monstr.eu");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Zynq remote processor control driver");
diff --git a/drivers/remoteproc/zynqmp_r5_remoteproc.c b/drivers/remoteproc/zynqmp_r5_remoteproc.c
new file mode 100644
index 000000000000..4c9103d0c211
--- /dev/null
+++ b/drivers/remoteproc/zynqmp_r5_remoteproc.c
@@ -0,0 +1,979 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Zynq R5 Remote Processor driver
+ *
+ * Copyright (C) 2015 - 2018 Xilinx Inc.
+ * Copyright (C) 2015 Jason Wu <j.wu@xilinx.com>
+ *
+ * Based on origin OMAP and Zynq Remote Processor driver
+ *
+ * Copyright (C) 2012 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012 PetaLogix
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/atomic.h>
+#include <linux/cpu.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/genalloc.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/zynqmp-ipi-message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "remoteproc_internal.h"
+
+#define MAX_RPROCS 2 /* Support up to 2 RPU */
+#define MAX_MEM_PNODES 4 /* Max power nodes for one RPU memory instance */
+
+#define DEFAULT_FIRMWARE_NAME "rproc-rpu-fw"
+
+/* PM proc states */
+#define PM_PROC_STATE_ACTIVE 1U
+
+/* IPI buffer MAX length */
+#define IPI_BUF_LEN_MAX 32U
+/* RX mailbox client buffer max length */
+#define RX_MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
+ sizeof(struct zynqmp_ipi_message))
+
+static bool autoboot __read_mostly;
+static bool allow_sysfs_kick __read_mostly;
+
+static const struct zynqmp_eemi_ops *eemi_ops;
+
+/**
+ * struct zynqmp_r5_mem - zynqmp rpu memory data
+ * @pnode_id: TCM power domain ids
+ * @res: memory resource
+ * @node: list node
+ */
+struct zynqmp_r5_mem {
+ u32 pnode_id[MAX_MEM_PNODES];
+ struct resource res;
+ struct list_head node;
+};
+
+/**
+ * struct zynqmp_r5_pdata - zynqmp rpu remote processor private data
+ * @dev: device of RPU instance
+ * @rproc: rproc handle
+ * @parent: RPU slot platform data
+ * @pnode_id: RPU CPU power domain id
+ * @mems: memory resources
+ * @is_r5_mode_set: indicate if r5 operation mode is set
+ * @tx_mc: tx mailbox client
+ * @rx_mc: rx mailbox client
+ * @tx_chan: tx mailbox channel
+ * @rx_chan: rx mailbox channel
+ * @workqueue: workqueue for the RPU remoteproc
+ * @tx_mc_skbs: socket buffers for tx mailbox client
+ * @rx_mc_buf: rx mailbox client buffer to save the rx message
+ * @remote_kick: flag to indicate if there is a kick from remote
+ */
+struct zynqmp_r5_pdata {
+ struct device dev;
+ struct rproc *rproc;
+ struct zynqmp_rpu_domain_pdata *parent;
+ u32 pnode_id;
+ struct list_head mems;
+ bool is_r5_mode_set;
+ struct mbox_client tx_mc;
+ struct mbox_client rx_mc;
+ struct mbox_chan *tx_chan;
+ struct mbox_chan *rx_chan;
+ struct work_struct workqueue;
+ struct sk_buff_head tx_mc_skbs;
+ unsigned char rx_mc_buf[RX_MBOX_CLIENT_BUF_MAX];
+ atomic_t remote_kick;
+};
+
+/**
+ * struct zynqmp_rpu_domain_pdata - zynqmp rpu platform data
+ * @rpus: table of RPUs
+ * @rpu_mode: RPU core configuration
+ */
+struct zynqmp_rpu_domain_pdata {
+ struct zynqmp_r5_pdata rpus[MAX_RPROCS];
+ enum rpu_oper_mode rpu_mode;
+};
+
+/*
+ * r5_set_mode - set RPU operation mode
+ * @pdata: Remote processor private data
+ *
+ * set RPU oepration mode
+ *
+ * Return: 0 for success, negative value for failure
+ */
+static int r5_set_mode(struct zynqmp_r5_pdata *pdata)
+{
+ u32 val[PAYLOAD_ARG_CNT] = {0}, expect;
+ struct zynqmp_rpu_domain_pdata *parent;
+ struct device *dev = &pdata->dev;
+ int ret;
+
+ if (pdata->is_r5_mode_set)
+ return 0;
+ parent = pdata->parent;
+ expect = (u32)parent->rpu_mode;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_GET_RPU_OPER_MODE,
+ 0, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to get RPU oper mode.\n");
+ return ret;
+ }
+ if (val[0] == expect) {
+ dev_dbg(dev, "RPU mode matches: %x\n", val[0]);
+ } else {
+ ret = eemi_ops->ioctl(pdata->pnode_id,
+ IOCTL_SET_RPU_OPER_MODE,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to set RPU oper mode.\n");
+ return ret;
+ }
+ }
+ if (expect == (u32)PM_RPU_MODE_LOCKSTEP)
+ expect = (u32)PM_RPU_TCM_COMB;
+ else
+ expect = (u32)PM_RPU_TCM_SPLIT;
+ ret = eemi_ops->ioctl(pdata->pnode_id, IOCTL_TCM_COMB_CONFIG,
+ expect, 0, val);
+ if (ret < 0) {
+ dev_err(dev, "failed to config TCM to %x.\n",
+ expect);
+ return ret;
+ }
+ pdata->is_r5_mode_set = true;
+ return 0;
+}
+
+/**
+ * r5_is_running - check if r5 is running
+ * @pdata: Remote processor private data
+ *
+ * check if R5 is running
+ *
+ * Return: true if r5 is running, false otherwise
+ */
+static bool r5_is_running(struct zynqmp_r5_pdata *pdata)
+{
+ u32 status, requirements, usage;
+ struct device *dev = &pdata->dev;
+
+ if (eemi_ops->get_node_status(pdata->pnode_id,
+ &status, &requirements, &usage)) {
+ dev_err(dev, "Failed to get RPU node %d status.\n",
+ pdata->pnode_id);
+ return false;
+ } else if (status != PM_PROC_STATE_ACTIVE) {
+ dev_dbg(dev, "RPU is not running.\n");
+ return false;
+ }
+
+ dev_dbg(dev, "RPU is running.\n");
+ return true;
+}
+
+/*
+ * ZynqMP R5 remoteproc memory release function
+ */
+static int zynqmp_r5_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct zynqmp_r5_mem *priv;
+ int i, ret;
+ struct device *dev = &rproc->dev;
+
+ priv = mem->priv;
+ if (!priv)
+ return 0;
+ for (i = 0; i < MAX_MEM_PNODES; i++) {
+ if (priv->pnode_id[i]) {
+ dev_dbg(dev, "%s, pnode %d\n",
+ __func__, priv->pnode_id[i]);
+ ret = eemi_ops->release_node(priv->pnode_id[i]);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to release power node: %u\n",
+ priv->pnode_id[i]);
+ return ret;
+ }
+ } else {
+ break;
+ }
+ }
+ return 0;
+}
+
+/*
+ * ZynqMP R5 remoteproc operations
+ */
+static int zynqmp_r5_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ enum rpu_boot_mem bootmem;
+ int ret;
+
+ /* Set up R5 */
+ ret = r5_set_mode(local);
+ if (ret) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ if ((rproc->bootaddr & 0xF0000000) == 0xF0000000)
+ bootmem = PM_RPU_BOOTMEM_HIVEC;
+ else
+ bootmem = PM_RPU_BOOTMEM_LOVEC;
+ dev_info(dev, "RPU boot from %s.",
+ bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
+
+ ret = eemi_ops->request_wakeup(local->pnode_id, 1, bootmem,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ if (ret < 0) {
+ dev_err(dev, "failed to boot R5.\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int zynqmp_r5_rproc_stop(struct rproc *rproc)
+{
+ struct zynqmp_r5_pdata *local = rproc->priv;
+ int ret;
+
+ ret = eemi_ops->force_powerdown(local->pnode_id,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(&local->dev, "failed to shutdown R5.\n");
+ return ret;
+ }
+ local->is_r5_mode_set = false;
+ return 0;
+}
+
+
+static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int num_mems, i, ret;
+ struct zynqmp_r5_pdata *pdata = rproc->priv;
+ struct device *dev = &pdata->dev;
+ struct device_node *np = dev->of_node;
+ struct rproc_mem_entry *mem;
+ struct device_node *child;
+ struct resource rsc;
+
+ num_mems = of_count_phandle_with_args(np, "memory-region", NULL);
+ if (num_mems <= 0)
+ return 0;
+
+ for (i = 0; i < num_mems; i++) {
+ struct device_node *node;
+ struct zynqmp_r5_mem *zynqmp_mem;
+ struct reserved_mem *rmem;
+
+ node = of_parse_phandle(np, "memory-region", i);
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+ if (strstr(node->name, "vdev") &&
+ strstr(node->name, "buffer")) {
+ int id;
+ char name[16];
+
+ id = node->name[8] - '0';
+ snprintf(name, sizeof(name), "vdev%dbuffer", id);
+ /* Register DMA region */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ NULL, NULL,
+ name);
+ if (!mem) {
+ dev_err(dev, "unable to initialize memory-region %s\n",
+ node->name);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "parsed %s at %llx\r\n", mem->name,
+ mem->dma);
+ rproc_add_carveout(rproc, mem);
+ continue;
+ } else if (strstr(node->name, "vdev") &&
+ strstr(node->name, "vring")) {
+ int id, vring_id;
+ char name[16];
+
+ id = node->name[8] - '0';
+ vring_id = node->name[14] - '0';
+ snprintf(name, sizeof(name), "vdev%dvring%d", id,
+ vring_id);
+ /* Register vring */
+ mem = rproc_mem_entry_init(dev, NULL,
+ (dma_addr_t)rmem->base,
+ rmem->size, rmem->base,
+ NULL, NULL,
+ name);
+ mem->va = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!mem->va)
+ return -ENOMEM;
+ if (!mem) {
+ dev_err(dev, "unable to initialize memory-region %s\n",
+ node->name);
+ return -ENOMEM;
+ }
+ dev_dbg(dev, "parsed %s at %llx\r\n", mem->name,
+ mem->dma);
+ rproc_add_carveout(rproc, mem);
+ continue;
+ } else {
+ mem = rproc_of_resm_mem_entry_init(dev, i,
+ rmem->size,
+ rmem->base,
+ node->name);
+ if (!mem) {
+ dev_err(dev, "unable to initialize memory-region %s \n",
+ node->name);
+ return -ENOMEM;
+ }
+ mem->va = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!mem->va)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+ if (!mem)
+ return -ENOMEM;
+
+
+ /*
+ * It is non-DMA memory, used for firmware loading.
+ * It will be added to the R5 remoteproc mappings later.
+ */
+ zynqmp_mem = devm_kzalloc(dev, sizeof(*zynqmp_mem), GFP_KERNEL);
+ if (!zynqmp_mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &zynqmp_mem->res);
+ if (ret) {
+ dev_err(dev, "unable to resolve memory region.\n");
+ return ret;
+ }
+ list_add_tail(&zynqmp_mem->node, &pdata->mems);
+ dev_dbg(dev, "%s, non-dma mem %s\n",
+ __func__, of_node_full_name(node));
+ }
+
+ /* map TCM memories */
+ for_each_available_child_of_node(np, child) {
+ struct property *prop;
+ const __be32 *cur;
+ u32 pnode_id;
+ void *va;
+ dma_addr_t dma;
+ resource_size_t size;
+
+ ret = of_address_to_resource(child, 0, &rsc);
+
+ i = 0;
+ of_property_for_each_u32(child, "pnode-id", prop, cur,
+ pnode_id) {
+ ret = eemi_ops->request_node(pnode_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0) {
+ dev_err(dev, "failed to request power node: %u\n",
+ pnode_id);
+ return ret;
+ }
+ ret = r5_set_mode(pdata);
+ if (ret < 0) {
+ dev_err(dev, "failed to set R5 operation mode.\n");
+ return ret;
+ }
+ }
+ size = resource_size(&rsc);
+
+ va = devm_ioremap_wc(dev, rsc.start, size);
+ if (!va)
+ return -ENOMEM;
+
+ /* zero out tcm base address */
+ if (rsc.start & 0xffe00000) {
+ rsc.start &= 0x000fffff;
+ /* handle tcm banks 1 a and b (0xffe9000 and oxffeb0000) */
+ if (rsc.start & 0x80000)
+ rsc.start -= 0x90000;
+ }
+
+ dma = (dma_addr_t)rsc.start;
+ mem = rproc_mem_entry_init(dev, va, dma, (int)size, rsc.start,
+ NULL, zynqmp_r5_mem_release,
+ rsc.name);
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ ret = 0;
+ return ret;
+}
+
+/* kick a firmware */
+static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid);
+
+ if (vqid < 0) {
+ /* If vqid is negative, does not pass the vqid to
+ * mailbox. As vqid is supposed to be 0 or possive.
+ * It also gives a way to just kick instead but
+ * not use the IPI buffer. It is better to provide
+ * a proper way to pass the short message, which will
+ * need to sync to upstream first, for now,
+ * use negative vqid to assume no message will be
+ * passed with IPI buffer, but just raise interrupt.
+ * This will be faster as it doesn't need to copy the
+ * message to the IPI buffer.
+ *
+ * It will ignore the return, as failure is due to
+ * there already kicks in the mailbox queue.
+ */
+ (void)mbox_send_message(local->tx_chan, NULL);
+ } else {
+ struct sk_buff *skb;
+ unsigned int skb_len;
+ struct zynqmp_ipi_message *mb_msg;
+ int ret;
+
+ skb_len = (unsigned int)(sizeof(vqid) + sizeof(mb_msg));
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(dev,
+ "Failed to allocate skb to kick remote.\n");
+ return;
+ }
+ mb_msg = (struct zynqmp_ipi_message *)skb_put(skb, skb_len);
+ mb_msg->len = sizeof(vqid);
+ memcpy(mb_msg->data, &vqid, sizeof(vqid));
+ skb_queue_tail(&local->tx_mc_skbs, skb);
+ ret = mbox_send_message(local->tx_chan, mb_msg);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to kick remote.\n");
+ skb_dequeue_tail(&local->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+ }
+}
+
+static bool zynqmp_r5_rproc_peek_remote_kick(struct rproc *rproc,
+ char *buf, size_t *len)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Peek if remote has kicked\n");
+
+ if (atomic_read(&local->remote_kick) != 0) {
+ if (buf && len) {
+ struct zynqmp_ipi_message *msg;
+
+ msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ memcpy(buf, msg->data, msg->len);
+ *len = (size_t)msg->len;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void zynqmp_r5_rproc_ack_remote_kick(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct zynqmp_r5_pdata *local = rproc->priv;
+
+ dev_dbg(dev, "Ack remote\n");
+
+ atomic_set(&local->remote_kick, 0);
+ (void)mbox_send_message(local->rx_chan, NULL);
+}
+
+static struct rproc_ops zynqmp_r5_rproc_ops = {
+ .start = zynqmp_r5_rproc_start,
+ .stop = zynqmp_r5_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = zynqmp_r5_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .kick = zynqmp_r5_rproc_kick,
+ .peek_remote_kick = zynqmp_r5_rproc_peek_remote_kick,
+ .ack_remote_kick = zynqmp_r5_rproc_ack_remote_kick,
+};
+
+/* zynqmp_r5_mem_probe() - probes RPU TCM memory device
+ * @pdata: pointer to the RPU remoteproc private data
+ * @node: pointer to the memory node
+ *
+ * Function to retrieve memories resources for RPU TCM memory device.
+ */
+static int zynqmp_r5_mem_probe(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev;
+ struct zynqmp_r5_mem *mem;
+ int ret;
+
+ dev = &pdata->dev;
+ mem = devm_kzalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ ret = of_address_to_resource(node, 0, &mem->res);
+ if (ret < 0) {
+ dev_err(dev, "failed to get resource of memory %s",
+ of_node_full_name(node));
+ return -EINVAL;
+ }
+
+ /* Get the power domain id */
+ if (of_find_property(node, "pnode-id", NULL)) {
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+ int i = 0;
+
+ of_property_for_each_u32(node, "pnode-id", prop, cur, val)
+ mem->pnode_id[i++] = val;
+ }
+ list_add_tail(&mem->node, &pdata->mems);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_release() - ZynqMP R5 device release function
+ * @dev: pointer to the device struct of ZynqMP R5
+ *
+ * Function to release ZynqMP R5 device.
+ */
+static void zynqmp_r5_release(struct device *dev)
+{
+ struct zynqmp_r5_pdata *pdata;
+ struct rproc *rproc;
+ struct sk_buff *skb;
+
+ pdata = dev_get_drvdata(dev);
+ rproc = pdata->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ }
+ if (pdata->tx_chan)
+ mbox_free_channel(pdata->tx_chan);
+ if (pdata->rx_chan)
+ mbox_free_channel(pdata->rx_chan);
+ /* Discard all SKBs */
+ while (!skb_queue_empty(&pdata->tx_mc_skbs)) {
+ skb = skb_dequeue(&pdata->tx_mc_skbs);
+ kfree_skb(skb);
+ }
+
+ put_device(dev->parent);
+}
+
+/**
+ * event_notified_idr_cb() - event notified idr callback
+ * @id: idr id
+ * @ptr: pointer to idr private data
+ * @data: data passed to idr_for_each callback
+ *
+ * Pass notification to remtoeproc virtio
+ *
+ * Return: 0. having return is to satisfy the idr_for_each() function
+ * pointer input argument requirement.
+ **/
+static int event_notified_idr_cb(int id, void *ptr, void *data)
+{
+ struct rproc *rproc = data;
+
+ (void)rproc_vq_interrupt(rproc, id);
+ return 0;
+}
+
+/**
+ * handle_event_notified() - remoteproc notification work funciton
+ * @work: pointer to the work structure
+ *
+ * It checks each registered remoteproc notify IDs.
+ */
+static void handle_event_notified(struct work_struct *work)
+{
+ struct rproc *rproc;
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(work, struct zynqmp_r5_pdata, workqueue);
+
+ (void)mbox_send_message(local->rx_chan, NULL);
+ rproc = local->rproc;
+ if (rproc->sysfs_kick) {
+ sysfs_notify(&rproc->dev.kobj, NULL, "remote_kick");
+ return;
+ }
+ /*
+ * We only use IPI for interrupt. The firmware side may or may
+ * not write the notifyid when it trigger IPI.
+ * And thus, we scan through all the registered notifyids.
+ */
+ idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc);
+}
+
+/**
+ * zynqmp_r5_mb_rx_cb() - Receive channel mailbox callback
+ * @cl: mailbox client
+ * @mssg: message pointer
+ *
+ * It will schedule the R5 notification work.
+ */
+static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *mssg)
+{
+ struct zynqmp_r5_pdata *local;
+
+ local = container_of(cl, struct zynqmp_r5_pdata, rx_mc);
+ if (mssg) {
+ struct zynqmp_ipi_message *ipi_msg, *buf_msg;
+ size_t len;
+
+ ipi_msg = (struct zynqmp_ipi_message *)mssg;
+ buf_msg = (struct zynqmp_ipi_message *)local->rx_mc_buf;
+ len = (ipi_msg->len >= IPI_BUF_LEN_MAX) ?
+ IPI_BUF_LEN_MAX : ipi_msg->len;
+ buf_msg->len = len;
+ memcpy(buf_msg->data, ipi_msg->data, len);
+ }
+ atomic_set(&local->remote_kick, 1);
+ schedule_work(&local->workqueue);
+}
+
+/**
+ * zynqmp_r5_mb_tx_done() - Request has been sent to the remote
+ * @cl: mailbox client
+ * @mssg: pointer to the message which has been sent
+ * @r: status of last TX - OK or error
+ *
+ * It will be called by the mailbox framework when the last TX has done.
+ */
+static void zynqmp_r5_mb_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct zynqmp_r5_pdata *local;
+ struct sk_buff *skb;
+
+ if (!mssg)
+ return;
+ local = container_of(cl, struct zynqmp_r5_pdata, tx_mc);
+ skb = skb_dequeue(&local->tx_mc_skbs);
+ kfree_skb(skb);
+}
+
+/**
+ * zynqmp_r5_setup_mbox() - Setup mailboxes
+ *
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @node: pointer of the device node
+ *
+ * Function to setup mailboxes to talk to RPU.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_setup_mbox(struct zynqmp_r5_pdata *pdata,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct mbox_client *mclient;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->tx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = NULL;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+ mclient->tx_done = zynqmp_r5_mb_tx_done;
+
+ /* Setup TX mailbox channel client */
+ mclient = &pdata->rx_mc;
+ mclient->dev = dev;
+ mclient->rx_callback = zynqmp_r5_mb_rx_cb;
+ mclient->tx_block = false;
+ mclient->knows_txdone = false;
+
+ INIT_WORK(&pdata->workqueue, handle_event_notified);
+
+ atomic_set(&pdata->remote_kick, 0);
+ /* Request TX and RX channels */
+ pdata->tx_chan = mbox_request_channel_byname(&pdata->tx_mc, "tx");
+ if (IS_ERR(pdata->tx_chan)) {
+ dev_err(dev, "failed to request mbox tx channel.\n");
+ pdata->tx_chan = NULL;
+ return -EINVAL;
+ }
+ pdata->rx_chan = mbox_request_channel_byname(&pdata->rx_mc, "rx");
+ if (IS_ERR(pdata->rx_chan)) {
+ dev_err(dev, "failed to request mbox rx channel.\n");
+ pdata->rx_chan = NULL;
+ return -EINVAL;
+ }
+ skb_queue_head_init(&pdata->tx_mc_skbs);
+ return 0;
+}
+
+/**
+ * zynqmp_r5_probe() - Probes ZynqMP R5 processor device node
+ * @pdata: pointer to the ZynqMP R5 processor platform data
+ * @pdev: parent RPU domain platform device
+ * @node: pointer of the device node
+ *
+ * Function to retrieve the information of the ZynqMP R5 device node.
+ *
+ * Return: 0 for success, negative value for failure.
+ */
+static int zynqmp_r5_probe(struct zynqmp_r5_pdata *pdata,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdata->dev;
+ struct rproc *rproc;
+ struct device_node *nc;
+ int ret;
+
+ /* Create device for ZynqMP R5 device */
+ dev->parent = &pdev->dev;
+ dev->release = zynqmp_r5_release;
+ dev->of_node = node;
+ dev_set_name(dev, "%s", of_node_full_name(node));
+ dev_set_drvdata(dev, pdata);
+ ret = device_register(dev);
+ if (ret) {
+ dev_err(dev, "failed to register device.\n");
+ return ret;
+ }
+ get_device(&pdev->dev);
+
+ /* Allocate remoteproc instance */
+ rproc = rproc_alloc(dev, dev_name(dev), &zynqmp_r5_rproc_ops, NULL, 0);
+ if (!rproc) {
+ dev_err(dev, "rproc allocation failed.\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ rproc->auto_boot = autoboot;
+ pdata->rproc = rproc;
+ rproc->priv = pdata;
+
+ /*
+ * The device has not been spawned from a device tree, so
+ * arch_setup_dma_ops has not been not called, thus leaving
+ * the device with dummy DMA ops.
+ * Fix this by inheriting the parent's DMA ops and mask.
+ */
+ rproc->dev.dma_mask = pdev->dev.dma_mask;
+ set_dma_ops(&rproc->dev, get_dma_ops(&pdev->dev));
+
+ /* Probe R5 memory devices */
+ INIT_LIST_HEAD(&pdata->mems);
+ for_each_available_child_of_node(node, nc) {
+ ret = zynqmp_r5_mem_probe(pdata, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe memory %s.\n",
+ of_node_full_name(nc));
+ goto error;
+ }
+ }
+
+ /* Set up DMA mask */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "dma_set_coherent_mask failed: %d\n", ret);
+ /* If DMA is not configured yet, try to configure it. */
+ ret = of_dma_configure(dev, node, true);
+ if (ret) {
+ dev_err(dev, "failed to configure DMA.\n");
+ goto error;
+ }
+ }
+
+ /* Get R5 power domain node */
+ ret = of_property_read_u32(node, "pnode-id", &pdata->pnode_id);
+ if (ret) {
+ dev_err(dev, "failed to get power node id.\n");
+ goto error;
+ }
+
+ /* Check if R5 is running */
+ if (r5_is_running(pdata)) {
+ atomic_inc(&rproc->power);
+ rproc->state = RPROC_RUNNING;
+ }
+
+ if (!of_get_property(dev->of_node, "mboxes", NULL)) {
+ dev_info(dev, "no mailboxes.\n");
+ } else {
+ ret = zynqmp_r5_setup_mbox(pdata, node);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Add R5 remoteproc */
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc registration failed\n");
+ goto error;
+ }
+
+ if (allow_sysfs_kick) {
+ dev_info(dev, "Trying to create remote sysfs entry.\n");
+ rproc->sysfs_kick = 1;
+ (void)rproc_create_kick_sysfs(rproc);
+ }
+
+ return 0;
+error:
+ if (pdata->rproc)
+ rproc_free(pdata->rproc);
+ pdata->rproc = NULL;
+ device_unregister(dev);
+ put_device(&pdev->dev);
+ return ret;
+}
+
+static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
+{
+ const unsigned char *prop;
+ int ret = 0, i;
+ struct zynqmp_rpu_domain_pdata *local;
+ struct device *dev = &pdev->dev;
+ struct device_node *nc;
+
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
+ local = devm_kzalloc(dev, sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, local);
+
+ prop = of_get_property(dev->of_node, "core_conf", NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "core_conf is not used.\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "RPU core_conf: %s\n", prop);
+ if (!strcmp(prop, "split")) {
+ local->rpu_mode = PM_RPU_MODE_SPLIT;
+ } else if (!strcmp(prop, "lockstep")) {
+ local->rpu_mode = PM_RPU_MODE_LOCKSTEP;
+ } else {
+ dev_err(dev,
+ "Invalid core_conf mode provided - %s , %d\n",
+ prop, (int)local->rpu_mode);
+ return -EINVAL;
+ }
+
+ i = 0;
+ for_each_available_child_of_node(dev->of_node, nc) {
+ local->rpus[i].parent = local;
+ ret = zynqmp_r5_probe(&local->rpus[i], pdev, nc);
+ if (ret) {
+ dev_err(dev, "failed to probe rpu %s.\n",
+ of_node_full_name(nc));
+ return ret;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int zynqmp_r5_remoteproc_remove(struct platform_device *pdev)
+{
+ struct zynqmp_rpu_domain_pdata *local = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MAX_RPROCS; i++) {
+ struct zynqmp_r5_pdata *rpu = &local->rpus[i];
+ struct rproc *rproc;
+
+ rproc = rpu->rproc;
+ if (rproc) {
+ rproc_del(rproc);
+ rproc_free(rproc);
+ rpu->rproc = NULL;
+ }
+ if (rpu->tx_chan) {
+ mbox_free_channel(rpu->tx_chan);
+ rpu->tx_chan = NULL;
+ }
+ if (rpu->rx_chan) {
+ mbox_free_channel(rpu->rx_chan);
+ rpu->rx_chan = NULL;
+ }
+
+ device_unregister(&rpu->dev);
+ }
+
+ return 0;
+}
+
+/* Match table for OF platform binding */
+static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
+ { .compatible = "xlnx,zynqmp-r5-remoteproc-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
+
+static struct platform_driver zynqmp_r5_remoteproc_driver = {
+ .probe = zynqmp_r5_remoteproc_probe,
+ .remove = zynqmp_r5_remoteproc_remove,
+ .driver = {
+ .name = "zynqmp_r5_remoteproc",
+ .of_match_table = zynqmp_r5_remoteproc_match,
+ },
+};
+module_platform_driver(zynqmp_r5_remoteproc_driver);
+
+module_param_named(autoboot, autoboot, bool, 0444);
+MODULE_PARM_DESC(autoboot,
+ "enable | disable autoboot. (default: true)");
+module_param_named(allow_sysfs_kick, allow_sysfs_kick, bool, 0444);
+MODULE_PARM_DESC(allow_sysfs_kick,
+ "enable | disable allow kick from sysfs. (default: false)");
+
+MODULE_AUTHOR("Jason Wu <j.wu@xilinx.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ZynqMP R5 remote processor control driver");
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 4b1077e2f826..0f0edbeec7cf 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -40,6 +40,12 @@
#define RTC_CALIB_MASK 0x1FFFFF
#define RTC_ALRM_MASK BIT(1)
#define RTC_MSEC 1000
+#define RTC_FR_MASK 0xF0000
+#define RTC_SEC_MAX_VAL 0xFFFFFFFF
+#define RTC_FR_MAX_TICKS 16
+#define RTC_OFFSET_MAX 150000
+#define RTC_OFFSET_MIN -150000
+#define RTC_PPB 1000000000LL
struct xlnx_rtc_dev {
struct rtc_device *rtc;
@@ -184,12 +190,84 @@ static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
}
+static int xlnx_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ long offset_val;
+ unsigned int reg;
+ unsigned int tick_mult = RTC_PPB / xrtcdev->calibval;
+
+ reg = readl(xrtcdev->reg_base + RTC_CALIB_RD);
+
+ /* Offset with seconds ticks */
+ offset_val = reg & RTC_TICK_MASK;
+ offset_val = offset_val - xrtcdev->calibval;
+ offset_val = offset_val * tick_mult;
+
+ /* Offset with fractional ticks */
+ if (reg & RTC_FR_EN)
+ offset_val += ((reg & RTC_FR_MASK) >> RTC_FR_DATSHIFT)
+ * (tick_mult / RTC_FR_MAX_TICKS);
+ *offset = offset_val;
+
+ return 0;
+}
+
+static int xlnx_rtc_set_offset(struct device *dev, long offset)
+{
+ struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
+ short int max_tick;
+ unsigned char fract_tick = 0;
+ unsigned int calibval;
+ int fract_offset;
+ unsigned int tick_mult = RTC_PPB / xrtcdev->calibval;
+
+ /* Make sure offset value is within supported range */
+ if (offset < RTC_OFFSET_MIN || offset > RTC_OFFSET_MAX)
+ return -ERANGE;
+
+ /* Number ticks for given offset */
+ max_tick = div_s64_rem(offset, tick_mult, &fract_offset);
+
+ /* Number fractional ticks for given offset */
+ if (fract_offset) {
+ if (fract_offset < 0) {
+ fract_offset = fract_offset + tick_mult;
+ max_tick--;
+ }
+ if (fract_offset > (tick_mult / RTC_FR_MAX_TICKS)) {
+ for (fract_tick = 1; fract_tick < 16; fract_tick++) {
+ if (fract_offset <=
+ (fract_tick *
+ (tick_mult / RTC_FR_MAX_TICKS)))
+ break;
+ }
+ }
+ }
+
+ /* Zynqmp RTC uses second and fractional tick
+ * counters for compensation
+ */
+ calibval = max_tick + xrtcdev->calibval;
+
+ if (fract_tick)
+ calibval |= RTC_FR_EN;
+
+ calibval |= (fract_tick << RTC_FR_DATSHIFT);
+
+ writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
+ return 0;
+}
+
static const struct rtc_class_ops xlnx_rtc_ops = {
.set_time = xlnx_rtc_set_time,
.read_time = xlnx_rtc_read_time,
.read_alarm = xlnx_rtc_read_alarm,
.set_alarm = xlnx_rtc_set_alarm,
.alarm_irq_enable = xlnx_rtc_alarm_irq_enable,
+ .read_offset = xlnx_rtc_read_offset,
+ .set_offset = xlnx_rtc_set_offset,
};
static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 646512d7276f..9767db279ba4 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -3,7 +3,8 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
- depends on HAS_IOMEM
+ select MFD_CORE
+ depends on HAS_IOMEM && COMMON_CLK
help
Provides the driver to enable and disable the isolation between the
processing system and programmable logic part by using the logicoreIP
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index f66bfea5de17..24b1caad3217 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_XILINX_VCU) += xlnx_vcu_core.o xlnx_vcu_clk.o xlnx_vcu.o
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a3aa40996f13..d89fcbaf9446 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -14,6 +14,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
/* Address map for different registers implemented in the VCU LogiCORE IP. */
#define VCU_ECODER_ENABLE 0x00
#define VCU_DECODER_ENABLE 0x04
@@ -26,14 +28,9 @@
#define VCU_ENC_FPS 0x20
#define VCU_MCU_CLK 0x24
#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
#define VCU_PLL_CLK 0x34
#define VCU_ENC_VIDEO_STANDARD 0x38
#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
#define VCU_DEC_VIDEO_STANDARD 0x4c
#define VCU_DEC_FRAME_SIZE_X 0x50
#define VCU_DEC_FRAME_SIZE_Y 0x54
@@ -41,196 +38,33 @@
#define VCU_BUFFER_B_FRAME 0x5c
#define VCU_WPP_EN 0x60
#define VCU_PLL_CLK_DEC 0x64
+#define VCU_NUM_CORE 0x6c
#define VCU_GASKET_INIT 0x74
#define VCU_GASKET_VALUE 0x03
-/* vcu slcr registers, bitmask and shift */
-#define VCU_PLL_CTRL 0x24
-#define VCU_PLL_CTRL_RESET_MASK 0x01
-#define VCU_PLL_CTRL_RESET_SHIFT 0
-#define VCU_PLL_CTRL_BYPASS_MASK 0x01
-#define VCU_PLL_CTRL_BYPASS_SHIFT 3
-#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
-#define VCU_PLL_CTRL_FBDIV_SHIFT 8
-#define VCU_PLL_CTRL_POR_IN_MASK 0x01
-#define VCU_PLL_CTRL_POR_IN_SHIFT 1
-#define VCU_PLL_CTRL_PWR_POR_MASK 0x01
-#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
-#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
-#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
-#define VCU_PLL_CTRL_DEFAULT 0
-#define VCU_PLL_DIV2 2
-
-#define VCU_PLL_CFG 0x28
-#define VCU_PLL_CFG_RES_MASK 0x0f
-#define VCU_PLL_CFG_RES_SHIFT 0
-#define VCU_PLL_CFG_CP_MASK 0x0f
-#define VCU_PLL_CFG_CP_SHIFT 5
-#define VCU_PLL_CFG_LFHF_MASK 0x03
-#define VCU_PLL_CFG_LFHF_SHIFT 10
-#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
-#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
-#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
-#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
-#define VCU_ENC_CORE_CTRL 0x30
-#define VCU_ENC_MCU_CTRL 0x34
-#define VCU_DEC_CORE_CTRL 0x38
-#define VCU_DEC_MCU_CTRL 0x3c
-#define VCU_PLL_DIVISOR_MASK 0x3f
-#define VCU_PLL_DIVISOR_SHIFT 4
-#define VCU_SRCSEL_MASK 0x01
-#define VCU_SRCSEL_SHIFT 0
-#define VCU_SRCSEL_PLL 1
-
-#define VCU_PLL_STATUS 0x60
-#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
-
#define MHZ 1000000
-#define FVCO_MIN (1500U * MHZ)
-#define FVCO_MAX (3000U * MHZ)
-#define DIVISOR_MIN 0
-#define DIVISOR_MAX 63
#define FRAC 100
-#define LIMIT (10 * MHZ)
/**
- * struct xvcu_device - Xilinx VCU init device structure
+ * struct xvcu_priv - Xilinx VCU private data
* @dev: Platform device
- * @pll_ref: pll ref clock source
- * @aclk: axi clock source
+ * @pll_ref: PLL ref clock source
+ * @core_enc: Core encoder clock
+ * @core_dec: Core decoder clock
+ * @mcu_enc: MCU encoder clock
+ * @mcu_dec: MCU decoder clock
* @logicore_reg_ba: logicore reg base address
* @vcu_slcr_ba: vcu_slcr Register base address
- * @coreclk: core clock frequency
*/
-struct xvcu_device {
+struct xvcu_priv {
struct device *dev;
struct clk *pll_ref;
- struct clk *aclk;
+ struct clk *core_enc;
+ struct clk *core_dec;
+ struct clk *mcu_enc;
+ struct clk *mcu_dec;
void __iomem *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
- u32 coreclk;
-};
-
-/**
- * struct xvcu_pll_cfg - Helper data
- * @fbdiv: The integer portion of the feedback divider to the PLL
- * @cp: PLL charge pump control
- * @res: PLL loop filter resistor control
- * @lfhf: PLL loop filter high frequency capacitor control
- * @lock_dly: Lock circuit configuration settings for lock windowsize
- * @lock_cnt: Lock circuit counter setting
- */
-struct xvcu_pll_cfg {
- u32 fbdiv;
- u32 cp;
- u32 res;
- u32 lfhf;
- u32 lock_dly;
- u32 lock_cnt;
-};
-
-static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
- { 25, 3, 10, 3, 63, 1000 },
- { 26, 3, 10, 3, 63, 1000 },
- { 27, 4, 6, 3, 63, 1000 },
- { 28, 4, 6, 3, 63, 1000 },
- { 29, 4, 6, 3, 63, 1000 },
- { 30, 4, 6, 3, 63, 1000 },
- { 31, 6, 1, 3, 63, 1000 },
- { 32, 6, 1, 3, 63, 1000 },
- { 33, 4, 10, 3, 63, 1000 },
- { 34, 5, 6, 3, 63, 1000 },
- { 35, 5, 6, 3, 63, 1000 },
- { 36, 5, 6, 3, 63, 1000 },
- { 37, 5, 6, 3, 63, 1000 },
- { 38, 5, 6, 3, 63, 975 },
- { 39, 3, 12, 3, 63, 950 },
- { 40, 3, 12, 3, 63, 925 },
- { 41, 3, 12, 3, 63, 900 },
- { 42, 3, 12, 3, 63, 875 },
- { 43, 3, 12, 3, 63, 850 },
- { 44, 3, 12, 3, 63, 850 },
- { 45, 3, 12, 3, 63, 825 },
- { 46, 3, 12, 3, 63, 800 },
- { 47, 3, 12, 3, 63, 775 },
- { 48, 3, 12, 3, 63, 775 },
- { 49, 3, 12, 3, 63, 750 },
- { 50, 3, 12, 3, 63, 750 },
- { 51, 3, 2, 3, 63, 725 },
- { 52, 3, 2, 3, 63, 700 },
- { 53, 3, 2, 3, 63, 700 },
- { 54, 3, 2, 3, 63, 675 },
- { 55, 3, 2, 3, 63, 675 },
- { 56, 3, 2, 3, 63, 650 },
- { 57, 3, 2, 3, 63, 650 },
- { 58, 3, 2, 3, 63, 625 },
- { 59, 3, 2, 3, 63, 625 },
- { 60, 3, 2, 3, 63, 625 },
- { 61, 3, 2, 3, 63, 600 },
- { 62, 3, 2, 3, 63, 600 },
- { 63, 3, 2, 3, 63, 600 },
- { 64, 3, 2, 3, 63, 600 },
- { 65, 3, 2, 3, 63, 600 },
- { 66, 3, 2, 3, 63, 600 },
- { 67, 3, 2, 3, 63, 600 },
- { 68, 3, 2, 3, 63, 600 },
- { 69, 3, 2, 3, 63, 600 },
- { 70, 3, 2, 3, 63, 600 },
- { 71, 3, 2, 3, 63, 600 },
- { 72, 3, 2, 3, 63, 600 },
- { 73, 3, 2, 3, 63, 600 },
- { 74, 3, 2, 3, 63, 600 },
- { 75, 3, 2, 3, 63, 600 },
- { 76, 3, 2, 3, 63, 600 },
- { 77, 3, 2, 3, 63, 600 },
- { 78, 3, 2, 3, 63, 600 },
- { 79, 3, 2, 3, 63, 600 },
- { 80, 3, 2, 3, 63, 600 },
- { 81, 3, 2, 3, 63, 600 },
- { 82, 3, 2, 3, 63, 600 },
- { 83, 4, 2, 3, 63, 600 },
- { 84, 4, 2, 3, 63, 600 },
- { 85, 4, 2, 3, 63, 600 },
- { 86, 4, 2, 3, 63, 600 },
- { 87, 4, 2, 3, 63, 600 },
- { 88, 4, 2, 3, 63, 600 },
- { 89, 4, 2, 3, 63, 600 },
- { 90, 4, 2, 3, 63, 600 },
- { 91, 4, 2, 3, 63, 600 },
- { 92, 4, 2, 3, 63, 600 },
- { 93, 4, 2, 3, 63, 600 },
- { 94, 4, 2, 3, 63, 600 },
- { 95, 4, 2, 3, 63, 600 },
- { 96, 4, 2, 3, 63, 600 },
- { 97, 4, 2, 3, 63, 600 },
- { 98, 4, 2, 3, 63, 600 },
- { 99, 4, 2, 3, 63, 600 },
- { 100, 4, 2, 3, 63, 600 },
- { 101, 4, 2, 3, 63, 600 },
- { 102, 4, 2, 3, 63, 600 },
- { 103, 5, 2, 3, 63, 600 },
- { 104, 5, 2, 3, 63, 600 },
- { 105, 5, 2, 3, 63, 600 },
- { 106, 5, 2, 3, 63, 600 },
- { 107, 3, 4, 3, 63, 600 },
- { 108, 3, 4, 3, 63, 600 },
- { 109, 3, 4, 3, 63, 600 },
- { 110, 3, 4, 3, 63, 600 },
- { 111, 3, 4, 3, 63, 600 },
- { 112, 3, 4, 3, 63, 600 },
- { 113, 3, 4, 3, 63, 600 },
- { 114, 3, 4, 3, 63, 600 },
- { 115, 3, 4, 3, 63, 600 },
- { 116, 3, 4, 3, 63, 600 },
- { 117, 3, 4, 3, 63, 600 },
- { 118, 3, 4, 3, 63, 600 },
- { 119, 3, 4, 3, 63, 600 },
- { 120, 3, 4, 3, 63, 600 },
- { 121, 3, 4, 3, 63, 600 },
- { 122, 3, 4, 3, 63, 600 },
- { 123, 3, 4, 3, 63, 600 },
- { 124, 3, 4, 3, 63, 600 },
- { 125, 3, 4, 3, 63, 600 },
};
/**
@@ -258,47 +92,71 @@ static inline void xvcu_write(void __iomem *iomem, u32 offset, u32 value)
}
/**
- * xvcu_write_field_reg - Write to the vcu reg field
- * @iomem: vcu reg space base address
- * @offset: vcu reg offset from base
- * @field: vcu reg field to write to
- * @mask: vcu reg mask
- * @shift: vcu reg number of bits to shift the bitfield
+ * xvcu_get_color_depth - read the color depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_ENC_COLOR_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_color_depth);
+
+/**
+ * xvcu_get_memory_depth - read the memory depth register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
*/
-static void xvcu_write_field_reg(void __iomem *iomem, int offset,
- u32 field, u32 mask, int shift)
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu)
{
- u32 val = xvcu_read(iomem, offset);
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_MEMORY_DEPTH);
+}
+EXPORT_SYMBOL_GPL(xvcu_get_memory_depth);
- val &= ~(mask << shift);
- val |= (field & mask) << shift;
+/**
+ * xvcu_get_clock_frequency - provide the core clock frequency
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
+}
+EXPORT_SYMBOL_GPL(xvcu_get_clock_frequency);
- xvcu_write(iomem, offset, val);
+/**
+ * xvcu_get_num_cores - read the number of core register
+ * @xvcu: Pointer to the xvcu_device structure
+ *
+ * Return: Returns 32bit value
+ *
+ */
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu)
+{
+ return xvcu_read(xvcu->logicore_reg_ba, VCU_NUM_CORE);
}
+EXPORT_SYMBOL_GPL(xvcu_get_num_cores);
/**
- * xvcu_set_vcu_pll_info - Set the VCU PLL info
+ * xvcu_set_vcu_pll - Set the VCU PLL
* @xvcu: Pointer to the xvcu_device structure
*
* Programming the VCU PLL based on the user configuration
* (ref clock freq, core clock freq, mcu clock freq).
* Core clock frequency has higher priority than mcu clock frequency
- * Errors in following cases
- * - When mcu or clock clock get from logicoreIP is 0
- * - When VCU PLL DIV related bits value other than 1
- * - When proper data not found for given data
- * - When sis570_1 clocksource related operation failed
*
* Return: Returns status, either success or error+reason
*/
-static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
+static int xvcu_set_vcu_pll(struct xvcu_priv *xvcu)
{
u32 refclk, coreclk, mcuclk, inte, deci;
- u32 divisor_mcu, divisor_core, fvco;
- u32 clkoutdiv, vcu_pll_ctrl, pll_clk;
- u32 cfg_val, mod, ctrl;
- int ret, i;
- const struct xvcu_pll_cfg *found = NULL;
+ int ret;
inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
@@ -314,175 +172,74 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
dev_dbg(xvcu->dev, "Core clock from logicoreIP is %uHz\n", coreclk);
dev_dbg(xvcu->dev, "Mcu clock from logicoreIP is %uHz\n", mcuclk);
- clk_disable_unprepare(xvcu->pll_ref);
ret = clk_set_rate(xvcu->pll_ref, refclk);
if (ret)
- dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate\n");
+ dev_warn(xvcu->dev, "failed to set logicoreIP refclk rate %d\n"
+ , ret);
ret = clk_prepare_enable(xvcu->pll_ref);
if (ret) {
- dev_err(xvcu->dev, "failed to enable pll_ref clock source\n");
+ dev_err(xvcu->dev, "failed to enable pll_ref clock source %d\n",
+ ret);
return ret;
}
- refclk = clk_get_rate(xvcu->pll_ref);
-
- /*
- * The divide-by-2 should be always enabled (==1)
- * to meet the timing in the design.
- * Otherwise, it's an error
- */
- vcu_pll_ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_CTRL);
- clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
- clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
- if (clkoutdiv != 1) {
- dev_err(xvcu->dev, "clkoutdiv value is invalid\n");
- return -EINVAL;
- }
+ ret = clk_set_rate(xvcu->mcu_enc, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
- for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
- const struct xvcu_pll_cfg *cfg = &xvcu_pll_cfg[i];
-
- fvco = cfg->fbdiv * refclk;
- if (fvco >= FVCO_MIN && fvco <= FVCO_MAX) {
- pll_clk = fvco / VCU_PLL_DIV2;
- if (fvco % VCU_PLL_DIV2 != 0)
- pll_clk++;
- mod = pll_clk % coreclk;
- if (mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- } else if (coreclk - mod < LIMIT) {
- divisor_core = pll_clk / coreclk;
- divisor_core++;
- } else {
- continue;
- }
- if (divisor_core >= DIVISOR_MIN &&
- divisor_core <= DIVISOR_MAX) {
- found = cfg;
- divisor_mcu = pll_clk / mcuclk;
- mod = pll_clk % mcuclk;
- if (mcuclk - mod < LIMIT)
- divisor_mcu++;
- break;
- }
- }
+ ret = clk_prepare_enable(xvcu->mcu_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_enc %d\n", ret);
+ goto error_mcu_enc;
}
- if (!found) {
- dev_err(xvcu->dev, "Invalid clock combination.\n");
- return -EINVAL;
+ ret = clk_set_rate(xvcu->mcu_dec, mcuclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP mcu clk rate %d\n",
+ ret);
+
+ ret = clk_prepare_enable(xvcu->mcu_dec);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable mcu_dec %d\n", ret);
+ goto error_mcu_dec;
}
- xvcu->coreclk = pll_clk / divisor_core;
- mcuclk = pll_clk / divisor_mcu;
- dev_dbg(xvcu->dev, "Actual Ref clock freq is %uHz\n", refclk);
- dev_dbg(xvcu->dev, "Actual Core clock freq is %uHz\n", xvcu->coreclk);
- dev_dbg(xvcu->dev, "Actual Mcu clock freq is %uHz\n", mcuclk);
-
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
- vcu_pll_ctrl |= (found->fbdiv & VCU_PLL_CTRL_FBDIV_MASK) <<
- VCU_PLL_CTRL_FBDIV_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_POR_IN_MASK <<
- VCU_PLL_CTRL_POR_IN_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_POR_IN_MASK) <<
- VCU_PLL_CTRL_POR_IN_SHIFT;
- vcu_pll_ctrl &= ~(VCU_PLL_CTRL_PWR_POR_MASK <<
- VCU_PLL_CTRL_PWR_POR_SHIFT);
- vcu_pll_ctrl |= (VCU_PLL_CTRL_DEFAULT & VCU_PLL_CTRL_PWR_POR_MASK) <<
- VCU_PLL_CTRL_PWR_POR_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CTRL, vcu_pll_ctrl);
-
- /* Set divisor for the core and mcu clock */
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_core & VCU_PLL_DIVISOR_MASK) <<
- VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_CORE_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_ENC_MCU_CTRL, ctrl);
-
- ctrl = xvcu_read(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL);
- ctrl &= ~(VCU_PLL_DIVISOR_MASK << VCU_PLL_DIVISOR_SHIFT);
- ctrl |= (divisor_mcu & VCU_PLL_DIVISOR_MASK) << VCU_PLL_DIVISOR_SHIFT;
- ctrl &= ~(VCU_SRCSEL_MASK << VCU_SRCSEL_SHIFT);
- ctrl |= (VCU_SRCSEL_PLL & VCU_SRCSEL_MASK) << VCU_SRCSEL_SHIFT;
- xvcu_write(xvcu->vcu_slcr_ba, VCU_DEC_MCU_CTRL, ctrl);
-
- /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
- cfg_val = (found->res << VCU_PLL_CFG_RES_SHIFT) |
- (found->cp << VCU_PLL_CFG_CP_SHIFT) |
- (found->lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
- (found->lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
- (found->lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
- xvcu_write(xvcu->vcu_slcr_ba, VCU_PLL_CFG, cfg_val);
+ ret = clk_set_rate(xvcu->core_enc, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- return 0;
-}
+ ret = clk_prepare_enable(xvcu->core_enc);
+ if (ret) {
+ dev_err(xvcu->dev, "failed to enable core_enc %d\n", ret);
+ goto error_core_enc;
+ }
-/**
- * xvcu_set_pll - PLL init sequence
- * @xvcu: Pointer to the xvcu_device structure
- *
- * Call the api to set the PLL info and once that is done then
- * init the PLL sequence to make the PLL stable.
- *
- * Return: Returns status, either success or error+reason
- */
-static int xvcu_set_pll(struct xvcu_device *xvcu)
-{
- u32 lock_status;
- unsigned long timeout;
- int ret;
+ ret = clk_set_rate(xvcu->core_dec, coreclk);
+ if (ret)
+ dev_warn(xvcu->dev, "failed to set logicoreIP core clk rate %d\n",
+ ret);
- ret = xvcu_set_vcu_pll_info(xvcu);
+ ret = clk_prepare_enable(xvcu->core_dec);
if (ret) {
- dev_err(xvcu->dev, "failed to set pll info\n");
- return ret;
+ dev_err(xvcu->dev, "failed to enable core_dec %d\n", ret);
+ goto error_core_dec;
}
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 1, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_RESET_MASK,
- VCU_PLL_CTRL_RESET_SHIFT);
- /*
- * Defined the timeout for the max time to wait the
- * PLL_STATUS to be locked.
- */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- lock_status = xvcu_read(xvcu->vcu_slcr_ba, VCU_PLL_STATUS);
- if (lock_status & VCU_PLL_STATUS_LOCK_STATUS_MASK) {
- xvcu_write_field_reg(xvcu->vcu_slcr_ba, VCU_PLL_CTRL,
- 0, VCU_PLL_CTRL_BYPASS_MASK,
- VCU_PLL_CTRL_BYPASS_SHIFT);
- return 0;
- }
- } while (!time_after(jiffies, timeout));
-
- /* PLL is not locked even after the timeout of the 2sec */
- dev_err(xvcu->dev, "PLL is not locked\n");
- return -ETIMEDOUT;
+ return 0;
+
+error_core_dec:
+ clk_disable_unprepare(xvcu->core_enc);
+error_core_enc:
+ clk_disable_unprepare(xvcu->mcu_dec);
+error_mcu_dec:
+ clk_disable_unprepare(xvcu->mcu_enc);
+error_mcu_enc:
+ clk_disable_unprepare(xvcu->pll_ref);
+
+ return ret;
}
/**
@@ -496,8 +253,8 @@ static int xvcu_set_pll(struct xvcu_device *xvcu)
*/
static int xvcu_probe(struct platform_device *pdev)
{
- struct resource *res;
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
+ struct xvcu_device *xvcu_core = dev_get_drvdata(pdev->dev.parent);
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -505,85 +262,61 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
xvcu->dev = &pdev->dev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
- if (!res) {
- dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->vcu_slcr_ba = xvcu_core->vcu_slcr_ba;
+ xvcu->logicore_reg_ba = xvcu_core->logicore_reg_ba;
- xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->vcu_slcr_ba) {
- dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
- return -ENOMEM;
+ xvcu->pll_ref = devm_clk_get(pdev->dev.parent, "pll_ref");
+ if (IS_ERR(xvcu->pll_ref)) {
+ dev_err(&pdev->dev, "Could not get pll_ref clock\n");
+ return PTR_ERR(xvcu->pll_ref);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
+ xvcu->core_enc = devm_clk_get(pdev->dev.parent, "vcu_core_enc");
+ if (IS_ERR(xvcu->core_enc)) {
+ dev_err(&pdev->dev, "Could not get core_enc clock\n");
+ return PTR_ERR(xvcu->core_enc);
}
- xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ xvcu->core_dec = devm_clk_get(pdev->dev.parent, "vcu_core_dec");
+ if (IS_ERR(xvcu->core_dec)) {
+ dev_err(&pdev->dev, "Could not get vcu_core_dec clock\n");
+ return PTR_ERR(xvcu->core_dec);
}
- xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
- if (IS_ERR(xvcu->aclk)) {
- dev_err(&pdev->dev, "Could not get aclk clock\n");
- return PTR_ERR(xvcu->aclk);
+ xvcu->mcu_enc = devm_clk_get(pdev->dev.parent, "vcu_mcu_enc");
+ if (IS_ERR(xvcu->mcu_enc)) {
+ dev_err(&pdev->dev, "Could not get mcu_enc clock\n");
+ return PTR_ERR(xvcu->mcu_enc);
}
- xvcu->pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- if (IS_ERR(xvcu->pll_ref)) {
- dev_err(&pdev->dev, "Could not get pll_ref clock\n");
- return PTR_ERR(xvcu->pll_ref);
+ xvcu->mcu_dec = devm_clk_get(pdev->dev.parent, "vcu_mcu_dec");
+ if (IS_ERR(xvcu->mcu_dec)) {
+ dev_err(&pdev->dev, "Could not get mcu_dec clock\n");
+ return PTR_ERR(xvcu->mcu_dec);
}
- ret = clk_prepare_enable(xvcu->aclk);
+ /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
+ ret = xvcu_set_vcu_pll(xvcu);
if (ret) {
- dev_err(&pdev->dev, "aclk clock enable failed\n");
+ dev_err(&pdev->dev, "Failed to set the pll\n");
return ret;
}
- ret = clk_prepare_enable(xvcu->pll_ref);
- if (ret) {
- dev_err(&pdev->dev, "pll_ref clock enable failed\n");
- goto error_aclk;
- }
-
- /*
- * Do the Gasket isolation and put the VCU out of reset
- * Bit 0 : Gasket isolation
- * Bit 1 : put VCU out of reset
- */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ dev_set_drvdata(&pdev->dev, xvcu);
- /* Do the PLL Settings based on the ref clk,core and mcu clk freq */
- ret = xvcu_set_pll(xvcu);
+ ret = devm_of_platform_populate(pdev->dev.parent);
if (ret) {
- dev_err(&pdev->dev, "Failed to set the pll\n");
- goto error_pll_ref;
+ dev_err(&pdev->dev, "Failed to register allegro codecs\n");
+ return ret;
}
- dev_set_drvdata(&pdev->dev, xvcu);
-
dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
- return 0;
-
-error_pll_ref:
- clk_disable_unprepare(xvcu->pll_ref);
-error_aclk:
- clk_disable_unprepare(xvcu->aclk);
return ret;
}
/**
- * xvcu_remove - Insert gasket isolation
+ * xvcu_remove - Depopulate the child nodes, Insert gasket isolation
* and disable the clock
* @pdev: Pointer to the platform_device structure
*
@@ -592,32 +325,33 @@ error_aclk:
*/
static int xvcu_remove(struct platform_device *pdev)
{
- struct xvcu_device *xvcu;
+ struct xvcu_priv *xvcu;
xvcu = platform_get_drvdata(pdev);
if (!xvcu)
return -ENODEV;
- /* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ clk_disable_unprepare(xvcu->core_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->core_enc);
+
+ clk_disable_unprepare(xvcu->core_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->core_dec);
+
+ clk_disable_unprepare(xvcu->mcu_enc);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_enc);
+
+ clk_disable_unprepare(xvcu->mcu_dec);
+ devm_clk_put(pdev->dev.parent, xvcu->mcu_dec);
clk_disable_unprepare(xvcu->pll_ref);
- clk_disable_unprepare(xvcu->aclk);
+ devm_clk_put(pdev->dev.parent, xvcu->pll_ref);
return 0;
}
-static const struct of_device_id xvcu_of_id_table[] = {
- { .compatible = "xlnx,vcu" },
- { .compatible = "xlnx,vcu-logicoreip-1.0" },
- { }
-};
-MODULE_DEVICE_TABLE(of, xvcu_of_id_table);
-
static struct platform_driver xvcu_driver = {
.driver = {
.name = "xilinx-vcu",
- .of_match_table = xvcu_of_id_table,
},
.probe = xvcu_probe,
.remove = xvcu_remove,
@@ -628,3 +362,4 @@ module_platform_driver(xvcu_driver);
MODULE_AUTHOR("Dhaval Shah <dshah@xilinx.com>");
MODULE_DESCRIPTION("Xilinx VCU init Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu");
diff --git a/drivers/soc/xilinx/xlnx_vcu_clk.c b/drivers/soc/xilinx/xlnx_vcu_clk.c
new file mode 100644
index 000000000000..fdf89967f8e8
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_clk.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU clock driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ * Tejas Patel <tejas.patel@xilinx.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+/* vcu slcr registers, bitmask and shift */
+#define VCU_PLL_CTRL 0x24
+#define VCU_PLL_CTRL_RESET_MASK BIT(0)
+#define VCU_PLL_CTRL_RESET_SHIFT 0
+#define VCU_PLL_CTRL_BYPASS_MASK BIT(3)
+#define VCU_PLL_CTRL_BYPASS_SHIFT 3
+#define VCU_PLL_CTRL_FBDIV_MASK 0x7f
+#define VCU_PLL_CTRL_FBDIV_SHIFT 8
+#define VCU_PLL_CTRL_POR_IN_MASK BIT(1)
+#define VCU_PLL_CTRL_POR_IN_SHIFT 1
+#define VCU_PLL_CTRL_PWR_POR_MASK BIT(2)
+#define VCU_PLL_CTRL_PWR_POR_SHIFT 2
+#define VCU_PLL_CTRL_CLKOUTDIV_MASK 0x03
+#define VCU_PLL_CTRL_CLKOUTDIV_SHIFT 16
+#define VCU_PLL_CTRL_DEFAULT 0
+
+#define VCU_PLL_CFG 0x28
+#define VCU_PLL_CFG_RES_MASK 0x0f
+#define VCU_PLL_CFG_RES_SHIFT 0
+#define VCU_PLL_CFG_CP_MASK 0x0f
+#define VCU_PLL_CFG_CP_SHIFT 5
+#define VCU_PLL_CFG_LFHF_MASK 0x03
+#define VCU_PLL_CFG_LFHF_SHIFT 10
+#define VCU_PLL_CFG_LOCK_CNT_MASK 0x03ff
+#define VCU_PLL_CFG_LOCK_CNT_SHIFT 13
+#define VCU_PLL_CFG_LOCK_DLY_MASK 0x7f
+#define VCU_PLL_CFG_LOCK_DLY_SHIFT 25
+#define VCU_ENC_CORE_CTRL 0x30
+#define VCU_ENC_MCU_CTRL 0x34
+#define VCU_ENC_MCU_CTRL_GATE_BIT BIT(12)
+#define VCU_DEC_CORE_CTRL 0x38
+#define VCU_DEC_MCU_CTRL 0x3c
+#define VCU_PLL_DIVISOR_MASK 0x3f
+#define VCU_PLL_DIVISOR_SHIFT 4
+#define VCU_SRCSEL_MASK 0x01
+#define VCU_SRCSEL_SHIFT 0
+#define VCU_SRCSEL_PLL 1
+
+#define VCU_PLL_STATUS 0x60
+#define VCU_PLL_STATUS_LOCK_STATUS_MASK 0x01
+#define VCU_PLL_LOCK_TIMEOUT 2000000
+
+#define PLL_FBDIV_MIN 25
+#define PLL_FBDIV_MAX 125
+
+#define MHZ 1000000
+#define FVCO_MIN (1500U * MHZ)
+#define FVCO_MAX (3000U * MHZ)
+#define DIVISOR_MIN 0
+#define DIVISOR_MAX 63
+#define FRAC 100
+#define LIMIT (10 * MHZ)
+
+#define FRAC_OFFSET 0x8
+#define PLLFCFG_FRAC_EN BIT(31)
+#define FRAC_DIV 0x10000 /* 2^16 */
+
+#define to_vcu_pll(_hw) container_of(_hw, struct vcu_pll, hw)
+#define div_mask(width) ((1 << (width)) - 1)
+
+enum pll_mode {
+ PLL_MODE_INT,
+ PLL_MODE_FRAC,
+};
+
+enum vcu_clks {
+ vcu_pll_half, vcu_core_enc, vcu_core_dec,
+ mcu_core_enc, mcu_core_dec, clk_max
+};
+
+/**
+ * struct xvcu_pll_cfg - Helper data
+ * @fbdiv: The integer portion of the feedback divider to the PLL
+ * @cp: PLL charge pump control
+ * @res: PLL loop filter resistor control
+ * @lfhf: PLL loop filter high frequency capacitor control
+ * @lock_dly: Lock circuit configuration settings for lock windowsize
+ * @lock_cnt: Lock circuit counter setting
+ */
+struct xvcu_pll_cfg {
+ u32 fbdiv;
+ u32 cp;
+ u32 res;
+ u32 lfhf;
+ u32 lock_dly;
+ u32 lock_cnt;
+};
+
+/**
+ * struct vcu_pll - VCU PLL control/status data
+ * @hw: Clock hardware
+ * @pll_ctrl: PLL control register address
+ * @pll_status: PLL status register address
+ * @pll_cfg: PLL config register address
+ * @lockbit: PLL lock status bit
+ */
+struct vcu_pll {
+ struct clk_hw hw;
+ void __iomem *pll_ctrl;
+ void __iomem *pll_status;
+ void __iomem *pll_cfg;
+ u8 lockbit;
+};
+
+static struct clk_hw_onecell_data *vcu_clk_data;
+static const char * const vcu_mux_parents[] = {
+ "dummy_name",
+ "vcu_pll_half"
+};
+
+static DEFINE_SPINLOCK(mcu_enc_lock);
+static DEFINE_SPINLOCK(mcu_dec_lock);
+static DEFINE_SPINLOCK(core_enc_lock);
+static DEFINE_SPINLOCK(core_dec_lock);
+
+static const struct xvcu_pll_cfg xvcu_pll_cfg[] = {
+ { 25, 3, 10, 3, 63, 1000 },
+ { 26, 3, 10, 3, 63, 1000 },
+ { 27, 4, 6, 3, 63, 1000 },
+ { 28, 4, 6, 3, 63, 1000 },
+ { 29, 4, 6, 3, 63, 1000 },
+ { 30, 4, 6, 3, 63, 1000 },
+ { 31, 6, 1, 3, 63, 1000 },
+ { 32, 6, 1, 3, 63, 1000 },
+ { 33, 4, 10, 3, 63, 1000 },
+ { 34, 5, 6, 3, 63, 1000 },
+ { 35, 5, 6, 3, 63, 1000 },
+ { 36, 5, 6, 3, 63, 1000 },
+ { 37, 5, 6, 3, 63, 1000 },
+ { 38, 5, 6, 3, 63, 975 },
+ { 39, 3, 12, 3, 63, 950 },
+ { 40, 3, 12, 3, 63, 925 },
+ { 41, 3, 12, 3, 63, 900 },
+ { 42, 3, 12, 3, 63, 875 },
+ { 43, 3, 12, 3, 63, 850 },
+ { 44, 3, 12, 3, 63, 850 },
+ { 45, 3, 12, 3, 63, 825 },
+ { 46, 3, 12, 3, 63, 800 },
+ { 47, 3, 12, 3, 63, 775 },
+ { 48, 3, 12, 3, 63, 775 },
+ { 49, 3, 12, 3, 63, 750 },
+ { 50, 3, 12, 3, 63, 750 },
+ { 51, 3, 2, 3, 63, 725 },
+ { 52, 3, 2, 3, 63, 700 },
+ { 53, 3, 2, 3, 63, 700 },
+ { 54, 3, 2, 3, 63, 675 },
+ { 55, 3, 2, 3, 63, 675 },
+ { 56, 3, 2, 3, 63, 650 },
+ { 57, 3, 2, 3, 63, 650 },
+ { 58, 3, 2, 3, 63, 625 },
+ { 59, 3, 2, 3, 63, 625 },
+ { 60, 3, 2, 3, 63, 625 },
+ { 61, 3, 2, 3, 63, 600 },
+ { 62, 3, 2, 3, 63, 600 },
+ { 63, 3, 2, 3, 63, 600 },
+ { 64, 3, 2, 3, 63, 600 },
+ { 65, 3, 2, 3, 63, 600 },
+ { 66, 3, 2, 3, 63, 600 },
+ { 67, 3, 2, 3, 63, 600 },
+ { 68, 3, 2, 3, 63, 600 },
+ { 69, 3, 2, 3, 63, 600 },
+ { 70, 3, 2, 3, 63, 600 },
+ { 71, 3, 2, 3, 63, 600 },
+ { 72, 3, 2, 3, 63, 600 },
+ { 73, 3, 2, 3, 63, 600 },
+ { 74, 3, 2, 3, 63, 600 },
+ { 75, 3, 2, 3, 63, 600 },
+ { 76, 3, 2, 3, 63, 600 },
+ { 77, 3, 2, 3, 63, 600 },
+ { 78, 3, 2, 3, 63, 600 },
+ { 79, 3, 2, 3, 63, 600 },
+ { 80, 3, 2, 3, 63, 600 },
+ { 81, 3, 2, 3, 63, 600 },
+ { 82, 3, 2, 3, 63, 600 },
+ { 83, 4, 2, 3, 63, 600 },
+ { 84, 4, 2, 3, 63, 600 },
+ { 85, 4, 2, 3, 63, 600 },
+ { 86, 4, 2, 3, 63, 600 },
+ { 87, 4, 2, 3, 63, 600 },
+ { 88, 4, 2, 3, 63, 600 },
+ { 89, 4, 2, 3, 63, 600 },
+ { 90, 4, 2, 3, 63, 600 },
+ { 91, 4, 2, 3, 63, 600 },
+ { 92, 4, 2, 3, 63, 600 },
+ { 93, 4, 2, 3, 63, 600 },
+ { 94, 4, 2, 3, 63, 600 },
+ { 95, 4, 2, 3, 63, 600 },
+ { 96, 4, 2, 3, 63, 600 },
+ { 97, 4, 2, 3, 63, 600 },
+ { 98, 4, 2, 3, 63, 600 },
+ { 99, 4, 2, 3, 63, 600 },
+ { 100, 4, 2, 3, 63, 600 },
+ { 101, 4, 2, 3, 63, 600 },
+ { 102, 4, 2, 3, 63, 600 },
+ { 103, 5, 2, 3, 63, 600 },
+ { 104, 5, 2, 3, 63, 600 },
+ { 105, 5, 2, 3, 63, 600 },
+ { 106, 5, 2, 3, 63, 600 },
+ { 107, 3, 4, 3, 63, 600 },
+ { 108, 3, 4, 3, 63, 600 },
+ { 109, 3, 4, 3, 63, 600 },
+ { 110, 3, 4, 3, 63, 600 },
+ { 111, 3, 4, 3, 63, 600 },
+ { 112, 3, 4, 3, 63, 600 },
+ { 113, 3, 4, 3, 63, 600 },
+ { 114, 3, 4, 3, 63, 600 },
+ { 115, 3, 4, 3, 63, 600 },
+ { 116, 3, 4, 3, 63, 600 },
+ { 117, 3, 4, 3, 63, 600 },
+ { 118, 3, 4, 3, 63, 600 },
+ { 119, 3, 4, 3, 63, 600 },
+ { 120, 3, 4, 3, 63, 600 },
+ { 121, 3, 4, 3, 63, 600 },
+ { 122, 3, 4, 3, 63, 600 },
+ { 123, 3, 4, 3, 63, 600 },
+ { 124, 3, 4, 3, 63, 600 },
+ { 125, 3, 4, 3, 63, 600 },
+};
+
+static int xvcu_divider_get_val(unsigned long rate, unsigned long parent_rate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags)
+{
+ unsigned int div;
+
+ if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+ div = DIV_ROUND_CLOSEST_ULL((u64)parent_rate, rate);
+ else
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+
+ return min_t(unsigned int, div, div_mask(width));
+}
+
+static unsigned long xvcu_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long xvcu_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int bestdiv;
+
+ bestdiv = xvcu_divider_get_val(rate, *prate, divider->table,
+ divider->width, divider->flags);
+
+ *prate = rate * bestdiv;
+
+ return rate;
+}
+
+static int xvcu_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ u32 val;
+
+ value = xvcu_divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ val = readl(divider->reg);
+ val &= ~(div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ return 0;
+}
+
+static const struct clk_ops xvcu_divider_ops = {
+ .recalc_rate = xvcu_divider_recalc_rate,
+ .round_rate = xvcu_divider_round_rate,
+ .set_rate = xvcu_divider_set_rate,
+};
+
+/**
+ * xvcu_register_divider - Register custom divider hardware
+ * @dev: VCU clock device
+ * @name: Divider name
+ * @parent_name: Divider parent name
+ * @flags: Clock flags
+ * @reg: Divider register base address
+ * @shift: Divider bits shift
+ * @width: Divider bits width
+ * @clk_divider_flags: Divider specific flags
+ * @lock: Shared register lock
+ *
+ * Register custom divider hardware to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *reg, u8 shift,
+ u8 width, u8 clk_divider_flags,
+ spinlock_t *lock)
+{
+ struct clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &xvcu_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+/**
+ * xvcu_pll_bypass_ctrl - Enable/Disable PLL bypass mode
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL bypass mode:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_bypass_ctrl(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg |= VCU_PLL_CTRL_BYPASS_MASK;
+ else
+ reg &= ~VCU_PLL_CTRL_BYPASS_MASK;
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_config - Configure PLL based on FBDIV value
+ * @pll: PLL data
+ *
+ * PLL needs to be configured before taking out of reset. Configuration
+ * data depends on the value of FBDIV for proper PLL locking.
+ */
+static void xvcu_pll_config(struct vcu_pll *pll)
+{
+ unsigned int fbdiv, reg;
+ int i;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ for (i = ARRAY_SIZE(xvcu_pll_cfg) - 1; i >= 0; i--) {
+ if (fbdiv != xvcu_pll_cfg[i].fbdiv)
+ continue;
+
+ /* Set RES, CP, LFHF, LOCK_CNT and LOCK_DLY cfg values */
+ reg = (xvcu_pll_cfg[i].res << VCU_PLL_CFG_RES_SHIFT) |
+ (xvcu_pll_cfg[i].cp << VCU_PLL_CFG_CP_SHIFT) |
+ (xvcu_pll_cfg[i].lfhf << VCU_PLL_CFG_LFHF_SHIFT) |
+ (xvcu_pll_cfg[i].lock_cnt << VCU_PLL_CFG_LOCK_CNT_SHIFT) |
+ (xvcu_pll_cfg[i].lock_dly << VCU_PLL_CFG_LOCK_DLY_SHIFT);
+ writel(reg, pll->pll_cfg);
+ }
+}
+
+/**
+ * xvcu_pll_enable_disable - Enable/Disable PLL
+ * @pll: PLL data
+ * @enable: Enable/Disable flag
+ *
+ * Enable/Disable PLL based on request:
+ * 0 - Disable
+ * 1 - Enable
+ */
+static void xvcu_pll_enable_disable(struct vcu_pll *pll, bool enable)
+{
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+ if (enable)
+ reg &= ~(VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ else
+ reg |= (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK);
+ writel(reg, pll->pll_ctrl);
+}
+
+/**
+ * xvcu_pll_is_enabled - Check if PLL is enabled or not
+ * @hw: Clock hardware
+ *
+ * Check if PLL is enabled or not. PLL enabled means PLL is not in
+ * reset state.
+ *
+ * Return: PLL status (0 - Disabled, 1 - Enabled)
+ */
+static int xvcu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(pll->pll_ctrl);
+
+ return !(reg & (VCU_PLL_CTRL_RESET_MASK | VCU_PLL_CTRL_POR_IN_MASK |
+ VCU_PLL_CTRL_PWR_POR_MASK));
+}
+
+/**
+ * xvcu_pll_enable - Enable PLL
+ * @hw: Clock hardware
+ *
+ * Enable PLL if it is not enabled. Configure PLL, enable and wait for
+ * the PLL lock. Put PLL into bypass state during PLL configuration.
+ *
+ * Return: 0 on success else error code
+ */
+static int xvcu_pll_enable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 reg;
+ int ret;
+
+ if (xvcu_pll_is_enabled(hw))
+ return 0;
+
+ pr_info("VCU PLL: enable\n");
+
+ xvcu_pll_bypass_ctrl(pll, 1);
+
+ xvcu_pll_config(pll);
+
+ xvcu_pll_enable_disable(pll, 1);
+
+ ret = readl_poll_timeout_atomic(pll->pll_status, reg,
+ reg & VCU_PLL_STATUS_LOCK_STATUS_MASK,
+ 1, VCU_PLL_LOCK_TIMEOUT);
+ if (ret) {
+ pr_err("VCU PLL is not locked\n");
+ return ret;
+ }
+
+ xvcu_pll_bypass_ctrl(pll, 0);
+
+ return ret;
+}
+
+/**
+ * xvcu_pll_disable - Disable PLL
+ * @hw: Clock hardware
+ *
+ * Disable PLL if it is enabled.
+ *
+ * Return: 0 on success else error code
+ */
+static void xvcu_pll_disable(struct clk_hw *hw)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+
+ if (!xvcu_pll_is_enabled(hw))
+ return;
+
+ pr_info("PLL: shutdown\n");
+ xvcu_pll_enable_disable(pll, 0);
+}
+
+/**
+ * xvcu_pll_frac_get_mode - Get PLL fraction mode
+ * @hw: Clock hardware
+ *
+ * Check if PLL is configured for integer mode or fraction mode.
+ *
+ * Return: PLL mode:
+ * PLL_MODE_FRAC - Fraction mode
+ * PLL_MODE_INT - Integer mode
+ */
+static inline enum pll_mode xvcu_pll_frac_get_mode(struct clk_hw *hw)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+
+ reg = reg & PLLFCFG_FRAC_EN;
+ return reg ? PLL_MODE_FRAC : PLL_MODE_INT;
+}
+
+/**
+ * xvcu_pll_frac_set_mode - Set PLL fraction mode
+ * @hw: Clock hardware
+ * @on: Enable/Disable flag
+ *
+ * Configure PLL for integer mode or fraction mode.
+ * 1 - Fraction mode
+ * 0 - Integer mode
+ */
+static inline void xvcu_pll_frac_set_mode(struct clk_hw *hw, bool on)
+{
+ struct vcu_pll *clk = to_vcu_pll(hw);
+ u32 reg = 0;
+
+ if (on)
+ reg = PLLFCFG_FRAC_EN;
+
+ reg = readl(clk->pll_ctrl + FRAC_OFFSET);
+ reg |= PLLFCFG_FRAC_EN;
+ writel(reg, (clk->pll_ctrl + FRAC_OFFSET));
+}
+
+static long vcu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 fbdiv;
+ long rate_div, f;
+
+ /* Enable the fractional mode if needed */
+ rate_div = (rate * FRAC_DIV) / *prate;
+ f = rate_div % FRAC_DIV;
+ xvcu_pll_frac_set_mode(hw, !!f);
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ if (rate > FVCO_MAX) {
+ fbdiv = rate / FVCO_MAX;
+ rate = rate / (fbdiv + 1);
+ }
+ if (rate < FVCO_MIN) {
+ fbdiv = DIV_ROUND_UP(FVCO_MIN, rate);
+ rate = rate * fbdiv;
+ }
+ return rate;
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ return *prate * fbdiv;
+}
+
+static unsigned long vcu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, data, reg;
+ unsigned long rate, frac;
+
+ reg = readl(pll->pll_ctrl);
+ fbdiv = (reg >> VCU_PLL_CTRL_FBDIV_SHIFT) & VCU_PLL_CTRL_FBDIV_MASK;
+
+ rate = parent_rate * fbdiv;
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ data = (readl(pll->pll_ctrl + FRAC_OFFSET) & 0xFFFF);
+ frac = (parent_rate * data) / FRAC_DIV;
+ rate = rate + frac;
+ }
+
+ return rate;
+}
+
+static int vcu_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct vcu_pll *pll = to_vcu_pll(hw);
+ u32 fbdiv, reg;
+ long rate_div, frac, m, f;
+
+ if (xvcu_pll_frac_get_mode(hw) == PLL_MODE_FRAC) {
+ rate_div = ((rate * FRAC_DIV) / parent_rate);
+ m = rate_div / FRAC_DIV;
+ f = rate_div % FRAC_DIV;
+ m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
+ rate = parent_rate * m;
+ frac = (parent_rate * f) / FRAC_DIV;
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= m << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ reg = readl(pll->pll_ctrl + FRAC_OFFSET);
+ reg &= ~0xFFFF;
+ reg |= (f & 0xFFFF);
+ writel(reg, pll->pll_ctrl + FRAC_OFFSET);
+
+ return (rate + frac);
+ }
+
+ fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ reg = readl(pll->pll_ctrl);
+ reg &= ~(VCU_PLL_CTRL_FBDIV_MASK << VCU_PLL_CTRL_FBDIV_SHIFT);
+ reg |= fbdiv << VCU_PLL_CTRL_FBDIV_SHIFT;
+ writel(reg, pll->pll_ctrl);
+
+ return parent_rate * fbdiv;
+}
+
+static const struct clk_ops vcu_pll_ops = {
+ .enable = xvcu_pll_enable,
+ .disable = xvcu_pll_disable,
+ .is_enabled = xvcu_pll_is_enabled,
+ .round_rate = vcu_pll_round_rate,
+ .recalc_rate = vcu_pll_recalc_rate,
+ .set_rate = vcu_pll_set_rate,
+};
+
+/**
+ * xvcu_register_pll - Register VCU PLL
+ * @dev: VCU clock device
+ * @name: PLL name
+ * @parent: PLL parent
+ * @reg_base: PLL register base address
+ * @flags: Hardware specific flags
+ *
+ * Register PLL to CCF.
+ *
+ * Return: Clock hardware for generated clock
+ */
+static struct clk_hw *xvcu_register_pll(struct device *dev, const char *name,
+ const char *parent,
+ void __iomem *reg_base,
+ unsigned long flags)
+{
+ struct vcu_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ init.name = name;
+ init.parent_names = &parent;
+ init.ops = &vcu_pll_ops;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll = devm_kmalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->hw.init = &init;
+ pll->pll_ctrl = reg_base + VCU_PLL_CTRL;
+ pll->pll_status = reg_base + VCU_PLL_STATUS;
+ pll->pll_cfg = reg_base + VCU_PLL_CFG;
+ pll->lockbit = VCU_PLL_STATUS_LOCK_STATUS_MASK;
+
+ hw = &pll->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ clk_hw_set_rate_range(hw, FVCO_MIN, FVCO_MAX);
+ if (ret < 0)
+ pr_err("%s:ERROR clk_set_rate_range failed %d\n", name, ret);
+
+ return hw;
+}
+
+/**
+ * register_vcu_leaf_clocks - Register VCU leaf clocks
+ * @dev: VCU clock device
+ * @name: Clock name
+ * @parents: Clock parents
+ * @nparents: Clock parent count
+ * @default_parent: Default parent to set
+ * @reg: Clock control register address
+ * @lock: Clock register access lock
+ *
+ * Register VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Topology for these clocks
+ * are Mux, Divisor and Gate.
+ *
+ * Return: Clock hardware for the generated gate clock
+ */
+static struct clk_hw *register_vcu_leaf_clocks(struct device *dev,
+ const char *name,
+ const char * const *parents,
+ u8 nparents,
+ struct clk *default_parent,
+ void __iomem *reg,
+ spinlock_t *lock)
+{
+ char *clk_mux, *clk_div;
+ struct clk_hw *hw;
+
+ clk_mux = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_mux");
+ hw = clk_hw_register_mux(dev, clk_mux, parents, nparents,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_SRCSEL_SHIFT, 1, 0, lock);
+
+ if (default_parent)
+ clk_set_parent(hw->clk, default_parent);
+
+ clk_div = devm_kasprintf(dev, GFP_KERNEL, "%s%s", name, "_div");
+ xvcu_register_divider(dev, clk_div, clk_mux,
+ CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ reg, VCU_PLL_DIVISOR_SHIFT, 6,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST,
+ lock);
+
+ return clk_hw_register_gate(dev, name, clk_div,
+ CLK_SET_RATE_PARENT, reg, 12, 0,
+ lock);
+}
+
+/**
+ * unregister_vcu_leaf_clocks - Unegister VCU leaf clocks
+ * @hw: VCU leaf clock hardware
+ *
+ * Unregister VCU leaf clocks. These clocks are MCU/core
+ * encoder and decoder clocks. Unregister clocks in order
+ * from gate, div and mux maintaining their parent dependency.
+ *
+ */
+static void unregister_vcu_leaf_clocks(struct clk_hw *hw)
+{
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_gate(hw);
+ hw = parent;
+
+ parent = clk_hw_get_parent(hw);
+ clk_hw_unregister_divider(hw);
+ hw = parent;
+
+ clk_hw_unregister_mux(hw);
+}
+
+/**
+ * xvcu_clock_init - Initialize VCU clocks
+ * @dev: VCU clock device
+ * @reg_base: Clock register base address
+ *
+ * Register VCU PLL and clocks and add VCU to clock provider list.
+ *
+ * Return: 0 on success else error code.
+ */
+static int xvcu_clock_init(struct device *dev, void __iomem *reg_base)
+{
+ struct clk_hw *hw;
+ struct clk *ref_clk;
+ const char *parent;
+ u32 vcu_pll_ctrl, clkoutdiv;
+ int i;
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ dev_err(dev, "failed to get pll_ref clock\n");
+ return PTR_ERR(ref_clk);
+ }
+
+ vcu_clk_data = devm_kzalloc(dev, sizeof(*vcu_clk_data) +
+ sizeof(*vcu_clk_data->hws) * clk_max,
+ GFP_KERNEL);
+ if (!vcu_clk_data)
+ return -ENOMEM;
+
+ parent = __clk_get_name(ref_clk);
+ hw = xvcu_register_pll(dev, "vcu_pll", parent, reg_base,
+ CLK_SET_RATE_NO_REPARENT);
+ if (IS_ERR(hw)) {
+ dev_err(dev, "VCU PLL registration failed\n");
+ return PTR_ERR(hw);
+ }
+
+ /*
+ * The divide-by-2 should be always enabled (== 1) to meet the timing
+ * in the design. Otherwise, it's an error
+ */
+ vcu_pll_ctrl = readl(reg_base + VCU_PLL_CTRL);
+ clkoutdiv = vcu_pll_ctrl >> VCU_PLL_CTRL_CLKOUTDIV_SHIFT;
+ clkoutdiv = clkoutdiv & VCU_PLL_CTRL_CLKOUTDIV_MASK;
+ if (clkoutdiv != 1) {
+ dev_err(dev, "clkoutdiv is invalid\n");
+ return -EINVAL;
+ }
+
+ vcu_clk_data->hws[vcu_pll_half] =
+ clk_hw_register_fixed_factor(dev, "vcu_pll_half", "vcu_pll",
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_SET_RATE_PARENT,
+ 1, 2);
+
+ vcu_clk_data->hws[vcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "vcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_CORE_CTRL,
+ &core_enc_lock);
+ vcu_clk_data->hws[vcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "vcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_CORE_CTRL,
+ &core_dec_lock);
+ vcu_clk_data->hws[mcu_core_enc] =
+ register_vcu_leaf_clocks(dev, "mcu_core_enc_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_ENC_MCU_CTRL,
+ &mcu_enc_lock);
+ vcu_clk_data->hws[mcu_core_dec] =
+ register_vcu_leaf_clocks(dev, "mcu_core_dec_clk",
+ vcu_mux_parents, 2,
+ vcu_clk_data->hws[vcu_pll_half]->clk,
+ reg_base + VCU_DEC_MCU_CTRL,
+ &mcu_dec_lock);
+
+ for (i = 0; i < clk_max; i++) {
+ if (IS_ERR(vcu_clk_data->hws[i])) {
+ dev_err(dev, "clk %d: register failed with %ld\n",
+ i, PTR_ERR(vcu_clk_data->hws[i]));
+ }
+ }
+
+ vcu_clk_data->num = clk_max;
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ vcu_clk_data);
+}
+
+static int xvcu_clk_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ ret = xvcu_clock_init(pdev->dev.parent, xvcu->vcu_slcr_ba);
+ if (ret)
+ dev_err(&pdev->dev, "clock init fail with error %d\n", ret);
+ else
+ dev_dbg(&pdev->dev, "clock init successful\n");
+
+ return ret;
+}
+
+static int xvcu_clk_remove(struct platform_device *pdev)
+{
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[vcu_core_dec]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_enc]);
+ unregister_vcu_leaf_clocks(vcu_clk_data->hws[mcu_core_dec]);
+ clk_hw_unregister(vcu_clk_data->hws[vcu_pll_half]);
+ of_clk_del_provider(pdev->dev.parent->of_node);
+
+ devm_kfree(pdev->dev.parent, vcu_clk_data);
+
+ return 0;
+}
+
+static struct platform_driver xvcu_clk_driver = {
+ .driver = {
+ .name = "xilinx-vcu-clk",
+ },
+ .probe = xvcu_clk_probe,
+ .remove = xvcu_clk_remove,
+};
+
+module_platform_driver(xvcu_clk_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:xilinx-vcu-clk");
diff --git a/drivers/soc/xilinx/xlnx_vcu_core.c b/drivers/soc/xilinx/xlnx_vcu_core.c
new file mode 100644
index 000000000000..def07ad09c67
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_vcu_core.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx VCU core driver
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <soc/xilinx/xlnx_vcu.h>
+
+static const struct mfd_cell xvcu_devs[] = {
+ {
+ .name = "xilinx-vcu-clk",
+ },
+ {
+ .name = "xilinx-vcu",
+ },
+};
+
+static int xvcu_core_probe(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+ struct resource *res;
+ int ret;
+
+ xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
+ if (!xvcu)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vcu_slcr");
+ if (!res) {
+ dev_err(&pdev->dev, "get vcu_slcr memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->vcu_slcr_ba = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->vcu_slcr_ba) {
+ dev_err(&pdev->dev, "vcu_slcr register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
+
+ xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xvcu->logicore_reg_ba) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, xvcu);
+
+ xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(xvcu->aclk)) {
+ dev_err(&pdev->dev, "Could not get aclk clock\n");
+ return PTR_ERR(xvcu->aclk);
+ }
+
+ ret = clk_prepare_enable(xvcu->aclk);
+ if (ret) {
+ dev_err(&pdev->dev, "aclk clock enable failed\n");
+ return ret;
+ }
+
+ /*
+ * Do the Gasket isolation and put the VCU out of reset
+ * Bit 0 : Gasket isolation
+ * Bit 1 : put VCU out of reset
+ */
+ xvcu->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvcu->reset_gpio)) {
+ ret = PTR_ERR(xvcu->reset_gpio);
+ dev_err(&pdev->dev, "failed to get reset gpio for vcu.\n");
+ return ret;
+ }
+
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ } else {
+ dev_warn(&pdev->dev, "No reset gpio info from dts for vcu. This may lead to incorrect functionality if VCU isolation is removed post initialization.\n");
+ }
+
+ iowrite32(VCU_GASKET_VALUE, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, xvcu_devs,
+ ARRAY_SIZE(xvcu_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ goto err_mfd_add_devices;
+ }
+
+ dev_dbg(&pdev->dev, "Successfully added MFD devices\n");
+
+ return 0;
+
+err_mfd_add_devices:
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return ret;
+}
+
+static int xvcu_core_remove(struct platform_device *pdev)
+{
+ struct xvcu_device *xvcu;
+
+ xvcu = platform_get_drvdata(pdev);
+ if (!xvcu)
+ return -ENODEV;
+
+ mfd_remove_devices(&pdev->dev);
+
+ /* Add the the Gasket isolation and put the VCU in reset. */
+ if (xvcu->reset_gpio) {
+ gpiod_set_value(xvcu->reset_gpio, 0);
+ /* min 2 clock cycle of vcu pll_ref, slowest freq is 33.33KHz */
+ usleep_range(60, 120);
+ gpiod_set_value(xvcu->reset_gpio, 1);
+ usleep_range(60, 120);
+ }
+ iowrite32(0, xvcu->logicore_reg_ba + VCU_GASKET_INIT);
+
+ clk_disable_unprepare(xvcu->aclk);
+
+ return 0;
+}
+
+static const struct of_device_id xvcu_core_of_id_table[] = {
+ { .compatible = "xlnx,vcu" },
+ { .compatible = "xlnx,vcu-logicoreip-1.0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, xvcu_core_of_id_table);
+
+static struct platform_driver xvcu_core_driver = {
+ .driver = {
+ .name = "xilinx-vcu-core",
+ .of_match_table = xvcu_core_of_id_table,
+ },
+ .probe = xvcu_core_probe,
+ .remove = xvcu_core_remove,
+};
+
+module_platform_driver(xvcu_core_driver);
+
+MODULE_AUTHOR("Rajan Vaja <rajan.vaja@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx VCU core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 11279dcc4a3e..b215b0875923 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -899,10 +899,18 @@ config SPI_XTENSA_XTFPGA
config SPI_ZYNQ_QSPI
tristate "Xilinx Zynq QSPI controller"
depends on ARCH_ZYNQ || COMPILE_TEST
+ depends on SPI_MASTER
help
This enables support for the Zynq Quad SPI controller
in master mode.
- This controller only supports SPI memory interface.
+
+config SPI_ZYNQ_QSPI_DUAL_STACKED
+ bool "Xilinx Zynq QSPI Dual stacked configuration"
+ depends on SPI_ZYNQ_QSPI
+ help
+ This selects the Xilinx ZYNQ Quad SPI controller in dual stacked mode.
+ Enable this option if your hw design is using dual stacked
+ configuration.
config SPI_ZYNQMP_GQSPI
tristate "Xilinx ZynqMP GQSPI controller"
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 9a86cc27fcc0..db1330281d65 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -7,6 +7,7 @@
*/
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -14,6 +15,24 @@
#define SPI_MEM_MAX_BUSWIDTH 8
+bool update_stripe(const struct spi_mem_op *op)
+{
+ if (op->cmd.opcode == SPINOR_OP_BE_4K ||
+ op->cmd.opcode == SPINOR_OP_BE_32K ||
+ op->cmd.opcode == SPINOR_OP_CHIP_ERASE ||
+ op->cmd.opcode == SPINOR_OP_SE ||
+ op->cmd.opcode == SPINOR_OP_BE_32K_4B ||
+ op->cmd.opcode == SPINOR_OP_SE_4B ||
+ op->cmd.opcode == SPINOR_OP_BE_4K_4B ||
+ op->cmd.opcode == SPINOR_OP_WRSR ||
+ op->cmd.opcode == SPINOR_OP_WREAR ||
+ op->cmd.opcode == SPINOR_OP_BRWR ||
+ (op->cmd.opcode == SPINOR_OP_WRSR2 && !op->addr.nbytes))
+ return false;
+
+ return true;
+}
+
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
* memory operation
@@ -348,6 +367,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ xfers[xferpos].dummy = op->dummy.nbytes * 8;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
@@ -362,6 +382,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
xfers[xferpos].tx_nbits = op->data.buswidth;
}
+ xfers[xferpos].stripe = update_stripe(op);
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 8dd2bb99cb4d..eba54ebee3a3 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -16,10 +16,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/io.h>
-
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#define XILINX_SPI_MAX_CS 32
#define XILINX_SPI_NAME "xilinx_spi"
@@ -76,14 +77,51 @@
#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+/* Number of bits per word */
+#define XSPI_ONE_BITS_PER_WORD 1
+#define XSPI_TWO_BITS_PER_WORD 2
+#define XSPI_FOUR_BITS_PER_WORD 4
+
+/* Number of data lines used to receive */
+#define XSPI_RX_ONE_WIRE 1
+#define XSPI_RX_FOUR_WIRE 4
+
+/* Auto suspend timeout in milliseconds */
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
+
+/* Command used for Dummy read Id */
+#define SPI_READ_ID 0x9F
+
+/**
+ * struct xilinx_spi - This definition define spi driver instance
+ * @regs: virt. address of the control registers
+ * @irq: IRQ number
+ * @axi_clk: Pointer to the AXI clock
+ * @axi4_clk: Pointer to the AXI4 clock
+ * @spi_clk: Pointer to the SPI clock
+ * @dev: Pointer to the device
+ * @rx_ptr: Pointer to the RX buffer
+ * @tx_ptr: Pointer to the TX buffer
+ * @bytes_per_word: Number of bytes in a word
+ * @buffer_size: Buffer size in words
+ * @cs_inactive: Level of the CS pins when inactive
+ * @read_fn: For reading data from SPI registers
+ * @write_fn: For writing data to SPI registers
+ * @bytes_to_transfer: Number of bytes left to transfer
+ * @bytes_to_receive: Number of bytes left to receive
+ * @rx_bus_width: Number of wires used to receive data
+ * @tx_fifo: For writing data to fifo
+ * @rx_fifo: For reading data from fifo
+ */
struct xilinx_spi {
- /* bitbang has to be first */
- struct spi_bitbang bitbang;
- struct completion done;
void __iomem *regs; /* virt. address of the control registers */
int irq;
+ struct clk *axi_clk;
+ struct clk *axi4_clk;
+ struct clk *spi_clk;
+ struct device *dev;
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
@@ -91,8 +129,69 @@ struct xilinx_spi {
u32 cs_inactive; /* Level of the CS pins when inactive*/
unsigned int (*read_fn)(void __iomem *);
void (*write_fn)(u32, void __iomem *);
+ u32 bytes_to_transfer;
+ u32 bytes_to_receive;
+ u32 rx_bus_width;
+ void (*tx_fifo)(struct xilinx_spi *xqspi);
+ void (*rx_fifo)(struct xilinx_spi *xqspi);
};
+/**
+ * XSPI_FIFO_READ - Generate xspi_read_rx_fifo_* functions
+ * @size: bits_per_word that are read from RX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_read_rx_fifo_* functions used to write
+ * data into RX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_READ(size, type) \
+static void xspi_read_rx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_receive > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_receive; \
+ u32 data; \
+ for (i = 0; i < count; i += (size / 8)) { \
+ data = readl_relaxed(xqspi->regs + XSPI_RXD_OFFSET); \
+ if (xqspi->rx_ptr) \
+ ((type *)xqspi->rx_ptr)[i] = (type)data; \
+ } \
+ xqspi->bytes_to_receive -= count; \
+ if (xqspi->rx_ptr) \
+ xqspi->rx_ptr += count; \
+}
+
+/**
+ * XSPI_FIFO_WRITE - Generate xspi_fill_tx_fifo_* functions
+ * @size: bits_per_word that are written into TX FIFO
+ * @type: C type of value argument
+ *
+ * Generates xspi_fill_tx_fifo_* functions used to write
+ * data into TX FIFO for different transaction widths.
+ */
+#define XSPI_FIFO_WRITE(size, type) \
+static void xspi_fill_tx_fifo_##size(struct xilinx_spi *xqspi) \
+{ \
+ int i; \
+ int count = (xqspi->bytes_to_transfer > xqspi->buffer_size) ? \
+ xqspi->buffer_size : xqspi->bytes_to_transfer; \
+ u32 data = 0; \
+ for (i = 0; i < count; i += (size / 8)) { \
+ if (xqspi->tx_ptr) \
+ data = (type)((u8 *)xqspi->tx_ptr)[i]; \
+ writel_relaxed(data, (xqspi->regs + XSPI_TXD_OFFSET)); \
+ } \
+ xqspi->bytes_to_transfer -= count; \
+ if (xqspi->tx_ptr) \
+ xqspi->tx_ptr += count; \
+}
+
+XSPI_FIFO_READ(8, u8)
+XSPI_FIFO_READ(16, u16)
+XSPI_FIFO_READ(32, u32)
+XSPI_FIFO_WRITE(8, u8)
+XSPI_FIFO_WRITE(16, u16)
+XSPI_FIFO_WRITE(32, u32)
static void xspi_write32(u32 val, void __iomem *addr)
{
iowrite32(val, addr);
@@ -113,53 +212,15 @@ static unsigned int xspi_read32_be(void __iomem *addr)
return ioread32be(addr);
}
-static void xilinx_spi_tx(struct xilinx_spi *xspi)
-{
- u32 data = 0;
-
- if (!xspi->tx_ptr) {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- return;
- }
-
- switch (xspi->bytes_per_word) {
- case 1:
- data = *(u8 *)(xspi->tx_ptr);
- break;
- case 2:
- data = *(u16 *)(xspi->tx_ptr);
- break;
- case 4:
- data = *(u32 *)(xspi->tx_ptr);
- break;
- }
-
- xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
- xspi->tx_ptr += xspi->bytes_per_word;
-}
-
-static void xilinx_spi_rx(struct xilinx_spi *xspi)
-{
- u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-
- if (!xspi->rx_ptr)
- return;
-
- switch (xspi->bytes_per_word) {
- case 1:
- *(u8 *)(xspi->rx_ptr) = data;
- break;
- case 2:
- *(u16 *)(xspi->rx_ptr) = data;
- break;
- case 4:
- *(u32 *)(xspi->rx_ptr) = data;
- break;
- }
-
- xspi->rx_ptr += xspi->bytes_per_word;
-}
-
+/**
+ * xspi_init_hw - Initialize the hardware
+ * @xspi: Pointer to the zynqmp_qspi structure
+ *
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable the SPI controller
+ */
static void xspi_init_hw(struct xilinx_spi *xspi)
{
void __iomem *regs_base = xspi->regs;
@@ -183,49 +244,106 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
regs_base + XSPI_CR_OFFSET);
}
-static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+/**
+ * xspi_chipselect - Select or deselect the chip select line
+ * @qspi: Pointer to the spi_device structure
+ * @is_high: Select(0) or deselect (1) the chip select line
+ *
+ */
+static void xspi_chipselect(struct spi_device *qspi, bool is_high)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- u16 cr;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
u32 cs;
- if (is_on == BITBANG_CS_INACTIVE) {
- /* Deselect the slave on the SPI bus */
- xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
- return;
+ if (is_high) {
+ /* Deselect the slave */
+ xqspi->write_fn(xqspi->cs_inactive,
+ xqspi->regs + XSPI_SSR_OFFSET);
+ } else {
+ cs = xqspi->cs_inactive;
+ cs ^= BIT(qspi->chip_select);
+ /* Activate the chip select */
+ xqspi->write_fn(cs, xqspi->regs + XSPI_SSR_OFFSET);
}
+}
- /* Set the SPI clock phase and polarity */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
- if (spi->mode & SPI_CPHA)
- cr |= XSPI_CR_CPHA;
- if (spi->mode & SPI_CPOL)
- cr |= XSPI_CR_CPOL;
- if (spi->mode & SPI_LSB_FIRST)
- cr |= XSPI_CR_LSB_FIRST;
- if (spi->mode & SPI_LOOP)
- cr |= XSPI_CR_LOOP;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
- /* We do not check spi->max_speed_hz here as the SPI clock
- * frequency is not software programmable (the IP block design
- * parameter)
- */
-
- cs = xspi->cs_inactive;
- cs ^= BIT(spi->chip_select);
+/**
+ * xilinx_spi_startup_block - Perform a dummy read as a
+ * work around for the startup block issue in the spi controller.
+ * @xspi: Pointer to the xilinx_spi structure
+ * @cs_num: chip select number.
+ *
+ * Perform a dummy read if startup block is enabled in the
+ * spi controller.
+ *
+ * Return: None
+ */
+static void xilinx_spi_startup_block(struct xilinx_spi *xspi, u32 cs_num)
+{
+ void __iomem *regs_base = xspi->regs;
+ u32 chip_sel, config_reg, status_reg;
/* Activate the chip select */
- xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
+ chip_sel = xspi->cs_inactive;
+ chip_sel ^= BIT(cs_num);
+ xspi->write_fn(chip_sel, regs_base + XSPI_SSR_OFFSET);
+
+ /* Write ReadId to the TXD register */
+ xspi->write_fn(SPI_READ_ID, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+ xspi->write_fn(0x0, regs_base + XSPI_TXD_OFFSET);
+
+ config_reg = xspi->read_fn(regs_base + XSPI_CR_OFFSET);
+ /* Enable master transaction */
+ config_reg &= ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_TX_EMPTY_MASK) == 0)
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+
+ /* Disable master transaction */
+ config_reg |= XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(config_reg, regs_base + XSPI_CR_OFFSET);
+
+ /* Read the RXD Register */
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ while ((status_reg & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->read_fn(regs_base + XSPI_RXD_OFFSET);
+ status_reg = xspi->read_fn(regs_base + XSPI_SR_OFFSET);
+ }
+
+ xspi_init_hw(xspi);
}
-/* spi_bitbang requires custom setup_transfer() to be defined if there is a
- * custom txrx_bufs().
+/**
+ * xilinx_spi_setup_transfer - Configure SPI controller for specified
+ * transfer
+ * @spi: Pointer to the spi_device structure
+ * @t: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 always
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 config_reg;
+
+ config_reg = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= ~(XSPI_CR_CPHA | XSPI_CR_CPOL);
+ if (spi->mode & SPI_CPHA)
+ config_reg |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ config_reg |= XSPI_CR_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ config_reg |= XSPI_CR_LSB_FIRST;
+ xspi->write_fn(config_reg, xspi->regs + XSPI_CR_OFFSET);
if (spi->mode & SPI_CS_HIGH)
xspi->cs_inactive &= ~BIT(spi->chip_select);
@@ -235,104 +353,257 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
return 0;
}
-static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+/**
+ * xspi_setup - Configure the SPI controller
+ * @qspi: Pointer to the spi_device structure
+ *
+ * Sets the operational mode of QSPI controller for the next QSPI
+ * transfer.
+ *
+ * Return: 0 on success; error value otherwise.
+ */
+static int xspi_setup(struct spi_device *qspi)
{
- struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
- int remaining_words; /* the number of words left to transfer */
- bool use_irq = false;
- u16 cr = 0;
-
- /* We get here with transmitter inhibited */
-
- xspi->tx_ptr = t->tx_buf;
- xspi->rx_ptr = t->rx_buf;
- remaining_words = t->len / xspi->bytes_per_word;
-
- if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
- u32 isr;
- use_irq = true;
- /* Inhibit irq to avoid spurious irqs on tx_empty*/
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- /* ACK old irqs (if any) */
- isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- if (isr)
- xspi->write_fn(isr,
- xspi->regs + XIPIF_V123B_IISR_OFFSET);
- /* Enable the global IPIF interrupt */
- xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
- xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- reinit_completion(&xspi->done);
+ int ret;
+ struct xilinx_spi *xqspi = spi_master_get_devdata(qspi->master);
+
+ if (qspi->master->busy)
+ return -EBUSY;
+
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = xilinx_spi_setup_transfer(qspi, NULL);
+ pm_runtime_put_sync(xqspi->dev);
+
+ return ret;
+}
+
+/**
+ * xspi_start_transfer - Initiates the SPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+
+static int xspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ xqspi->tx_ptr = transfer->tx_buf;
+ xqspi->rx_ptr = transfer->rx_buf;
+
+ if (transfer->dummy) {
+ xqspi->bytes_to_transfer = (transfer->len - (transfer->dummy / 8))
+ + ((transfer->dummy / 8) *
+ xqspi->rx_bus_width);
+ xqspi->bytes_to_receive = (transfer->len - (transfer->dummy / 8))
+ + ((transfer->dummy / 8) *
+ xqspi->rx_bus_width);
+ } else {
+ xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_receive = transfer->len;
+
}
- while (remaining_words) {
- int n_words, tx_words, rx_words;
- u32 sr;
- int stalled;
+ xilinx_spi_setup_transfer(qspi, transfer);
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ /* Enable master transaction inhibit */
+ cr |= XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->tx_fifo(xqspi);
+ /* Disable master transaction inhibit */
+ cr &= ~XSPI_CR_TRANS_INHIBIT;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+ xqspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xqspi->regs + XIPIF_V123B_DGIER_OFFSET);
+
+ return transfer->len;
+}
- n_words = min(remaining_words, xspi->buffer_size);
+/**
+ * xspi_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int xspi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
- tx_words = n_words;
- while (tx_words--)
- xilinx_spi_tx(xspi);
+ u32 cr;
+ int ret;
- /* Start the transfer by not inhibiting the transmitter any
- * longer
- */
+ ret = pm_runtime_get_sync(xqspi->dev);
+ if (ret < 0)
+ return ret;
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr |= XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
- if (use_irq) {
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- wait_for_completion(&xspi->done);
- /* A transmit has just completed. Process received data
- * and check for more data to transmit. Always inhibit
- * the transmitter while the Isr refills the transmit
- * register/FIFO, or make sure it is stopped if we're
- * done.
- */
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
- sr = XSPI_SR_TX_EMPTY_MASK;
- } else
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- rx_words = n_words;
- stalled = 10;
- while (rx_words) {
- if (rx_words == n_words && !(stalled--) &&
- !(sr & XSPI_SR_TX_EMPTY_MASK) &&
- (sr & XSPI_SR_RX_EMPTY_MASK)) {
- dev_err(&spi->dev,
- "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
- xspi_init_hw(xspi);
- return -EIO;
- }
-
- if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- continue;
- }
-
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- if (!(sr & XSPI_SR_RX_EMPTY_MASK)) {
- xilinx_spi_rx(xspi);
- rx_words--;
- }
- }
+ return 0;
+}
+
+/**
+ * xspi_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int xspi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct xilinx_spi *xqspi = spi_master_get_devdata(master);
+ u32 cr;
+
+ cr = xqspi->read_fn(xqspi->regs + XSPI_CR_OFFSET);
+ cr &= ~XSPI_CR_ENABLE;
+ xqspi->write_fn(cr, xqspi->regs + XSPI_CR_OFFSET);
+
+ pm_runtime_put_sync(xqspi->dev);
+ return 0;
+}
+
+/**
+ * xilinx_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused xilinx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(xspi->axi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(xspi->axi4_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable AXI4 clock\n");
+ goto clk_disable_axi_clk;
- remaining_words -= n_words;
}
- if (use_irq) {
- xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ ret = clk_enable(xspi->spi_clk);
+ if (ret) {
+ dev_err(dev, "Can not enable SPI clock\n");
+ goto clk_disable_axi4_clk;
}
- return t->len;
+ return 0;
+
+clk_disable_axi4_clk:
+ clk_disable(xspi->axi4_clk);
+clk_disable_axi_clk:
+ clk_disable(xspi->axi_clk);
+
+ return ret;
}
+/**
+ * xilinx_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
+
+ return 0;
+}
+
+/**
+ * xilinx_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the SPI driver queue and initializes the SPI
+ * controller
+ *
+ * Return: 0 on success; error value otherwise
+ */
+static int __maybe_unused xilinx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = xilinx_spi_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = spi_master_resume(master);
+ if (ret < 0) {
+ clk_disable(xspi->axi_clk);
+ clk_disable(xspi->axi4_clk);
+ clk_disable(xspi->spi_clk);
+ }
+
+ return ret;
+}
+
+/**
+ * xilinx_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function stops the SPI driver queue and disables the SPI controller
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused xilinx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ xilinx_spi_runtime_suspend(dev);
+
+ xspi_unprepare_transfer_hardware(master);
+
+ return ret;
+}
+
+static const struct dev_pm_ops xilinx_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(xilinx_spi_runtime_suspend,
+ xilinx_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_spi_suspend, xilinx_spi_resume)
+};
/* This driver supports single master mode only. Hence Tx FIFO Empty
* is the only interrupt we care about.
@@ -341,112 +612,163 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
*/
static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
{
- struct xilinx_spi *xspi = dev_id;
+ struct spi_master *master = dev_id;
+ struct xilinx_spi *xspi = spi_master_get_devdata(dev_id);
u32 ipif_isr;
+ int status = IRQ_NONE;
/* Get the IPIF interrupts, and clear them immediately */
ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
-
- if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- complete(&xspi->done);
- return IRQ_HANDLED;
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) {
+ /* Transmission completed */
+ xspi->rx_fifo(xspi);
+ if (xspi->bytes_to_transfer) {
+ /* There is more data to send */
+ xspi->tx_fifo(xspi);
+ }
+ status = IRQ_HANDLED;
}
- return IRQ_NONE;
-}
-
-static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
-{
- u8 sr;
- int n_words = 0;
-
- /*
- * Before the buffer_size detection we reset the core
- * to make sure we start with a clean state.
- */
- xspi->write_fn(XIPIF_V123B_RESET_MASK,
- xspi->regs + XIPIF_V123B_RESETR_OFFSET);
-
- /* Fill the Tx FIFO with as many words as possible */
- do {
- xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- n_words++;
- } while (!(sr & XSPI_SR_TX_FULL_MASK));
+ if (!xspi->bytes_to_receive && !xspi->bytes_to_transfer) {
+ spi_finalize_current_transfer(master);
+ /* Disable the interrupts here. */
+ xspi->write_fn(0x0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ }
- return n_words;
+ return status;
}
-static const struct of_device_id xilinx_spi_of_match[] = {
- { .compatible = "xlnx,axi-quad-spi-1.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.b", },
- {}
-};
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-
static int xilinx_spi_probe(struct platform_device *pdev)
{
struct xilinx_spi *xspi;
- struct xspi_platform_data *pdata;
struct resource *res;
int ret, num_cs = 0, bits_per_word;
+ u32 cs_num;
struct spi_master *master;
- u32 tmp;
- u8 i;
+ struct device_node *nc;
+ u32 tmp, rx_bus_width, fifo_size;
+ bool startup_block;
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata) {
- num_cs = pdata->num_chipselect;
- bits_per_word = pdata->bits_per_word;
- } else {
- of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
- &num_cs);
- ret = of_property_read_u32(pdev->dev.of_node,
- "xlnx,num-transfer-bits",
- &bits_per_word);
- if (ret)
- bits_per_word = 8;
- }
-
- if (!num_cs) {
- dev_err(&pdev->dev,
- "Missing slave select configuration data\n");
- return -EINVAL;
- }
+ of_property_read_u32(pdev->dev.of_node, "num-cs",
+ &num_cs);
+ if (!num_cs)
+ num_cs = 1;
if (num_cs > XILINX_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
return -EINVAL;
}
+ startup_block = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,startup-block");
master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
- SPI_CS_HIGH;
-
xspi = spi_master_get_devdata(master);
- xspi->cs_inactive = 0xffffffff;
- xspi->bitbang.master = master;
- xspi->bitbang.chipselect = xilinx_spi_chipselect;
- xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
- xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
- init_completion(&xspi->done);
-
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xspi->regs)) {
ret = PTR_ERR(xspi->regs);
goto put_master;
}
+ ret = of_property_read_u32(pdev->dev.of_node, "fifo-size",
+ &fifo_size);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Missing fifo size\n");
+ return -EINVAL;
+ }
+ of_property_read_u32(pdev->dev.of_node, "bits-per-word",
+ &bits_per_word);
+
+ xspi->rx_bus_width = XSPI_ONE_BITS_PER_WORD;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ if (startup_block) {
+ ret = of_property_read_u32(nc, "reg",
+ &cs_num);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->dev.of_node = pdev->dev.of_node;
+ xspi->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
+ if (IS_ERR(xspi->axi_clk)) {
+ if (PTR_ERR(xspi->axi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi_clk);
+ goto put_master;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI clock\n");
+ goto put_master;
+ }
+
+ xspi->axi4_clk = devm_clk_get(&pdev->dev, "axi4_clk");
+ if (IS_ERR(xspi->axi4_clk)) {
+ if (PTR_ERR(xspi->axi4_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->axi4_clk);
+ goto clk_unprepare_axi_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->axi4_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->axi4_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare AXI4 clock\n");
+ goto clk_unprepare_axi_clk;
+ }
+
+ xspi->spi_clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(xspi->spi_clk)) {
+ if (PTR_ERR(xspi->spi_clk) != -ENOENT) {
+ ret = PTR_ERR(xspi->spi_clk);
+ goto clk_unprepare_axi4_clk;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ xspi->spi_clk = NULL;
+ }
+
+ ret = clk_prepare(xspi->spi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare SPI clock\n");
+ goto clk_unprepare_axi4_clk;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto clk_unprepare_all;
+
+ xspi->dev = &pdev->dev;
/*
* Detect endianess on the IP via loop bit in CR. Detection
@@ -466,62 +788,112 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->write_fn = xspi_write32_be;
}
- master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
- xspi->bytes_per_word = bits_per_word / 8;
- xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
-
+ xspi->buffer_size = fifo_size;
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
ret = xspi->irq;
- goto put_master;
+ goto clk_unprepare_all;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
- ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
- dev_name(&pdev->dev), xspi);
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq,
+ 0, dev_name(&pdev->dev), master);
if (ret)
- goto put_master;
+ goto clk_unprepare_all;
}
/* SPI controller initializations */
xspi_init_hw(xspi);
- ret = spi_bitbang_start(&xspi->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
- goto put_master;
+ pm_runtime_put(&pdev->dev);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->setup = xspi_setup;
+ master->set_cs = xspi_chipselect;
+ master->transfer_one = xspi_start_transfer;
+ master->prepare_transfer_hardware = xspi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware = xspi_unprepare_transfer_hardware;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->tx_fifo = xspi_fill_tx_fifo_8;
+ xspi->rx_fifo = xspi_read_rx_fifo_8;
+ if (xspi->rx_bus_width == XSPI_RX_ONE_WIRE) {
+ if (xspi->bytes_per_word == XSPI_TWO_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_16;
+ xspi->rx_fifo = xspi_read_rx_fifo_16;
+ } else if (xspi->bytes_per_word == XSPI_FOUR_BITS_PER_WORD) {
+ xspi->tx_fifo = xspi_fill_tx_fifo_32;
+ xspi->rx_fifo = xspi_read_rx_fifo_32;
+ }
+ } else if (xspi->rx_bus_width == XSPI_RX_FOUR_WIRE) {
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ } else {
+ dev_err(&pdev->dev, "Dual Mode not supported\n");
+ goto clk_unprepare_all;
}
+ xspi->cs_inactive = 0xffffffff;
- dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)res->start, xspi->regs, xspi->irq);
+ /*
+ * This is the work around for the startup block issue in
+ * the spi controller. SPI clock is passing through STARTUP
+ * block to FLASH. STARTUP block don't provide clock as soon
+ * as QSPI provides command. So first command fails.
+ */
+ if (startup_block)
+ xilinx_spi_startup_block(xspi, cs_num);
- if (pdata) {
- for (i = 0; i < pdata->num_devices; i++)
- spi_new_device(master, pdata->devices + i);
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_unprepare_all;
}
- platform_set_drvdata(pdev, master);
- return 0;
+ return ret;
+clk_unprepare_all:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ clk_unprepare(xspi->spi_clk);
+clk_unprepare_axi4_clk:
+ clk_unprepare(xspi->axi4_clk);
+clk_unprepare_axi_clk:
+ clk_unprepare(xspi->axi_clk);
put_master:
spi_master_put(master);
return ret;
}
+/**
+ * xilinx_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 Always
+ */
static int xilinx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct xilinx_spi *xspi = spi_master_get_devdata(master);
void __iomem *regs_base = xspi->regs;
- spi_bitbang_stop(&xspi->bitbang);
-
/* Disable all the interrupts just in case */
xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Disable the global IPIF interrupt */
xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
- spi_master_put(xspi->bitbang.master);
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(xspi->axi_clk);
+ clk_disable_unprepare(xspi->axi4_clk);
+ clk_disable_unprepare(xspi->spi_clk);
+
+ spi_unregister_master(master);
return 0;
}
@@ -529,12 +901,21 @@ static int xilinx_spi_remove(struct platform_device *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
static struct platform_driver xilinx_spi_driver = {
.probe = xilinx_spi_probe,
.remove = xilinx_spi_remove,
.driver = {
.name = XILINX_SPI_NAME,
.of_match_table = xilinx_spi_of_match,
+ .pm = &xilinx_spi_dev_pm_ops,
},
};
module_platform_driver(xilinx_spi_driver);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 17641157354d..0f6957a9a4a7 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/gpio.h>
/* Register offset definitions */
#define ZYNQ_QSPI_CONFIG_OFFSET 0x00 /* Configuration Register, RW */
@@ -62,6 +62,7 @@
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0) /* Baud rate maximum */
#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift */
#define ZYNQ_QSPI_CONFIG_PCS BIT(10) /* Peripheral Chip Select */
+#define ZYNQ_QSPI_SS_SHIFT 10 /* Slave Select field shift in CR */
/*
* QSPI Interrupt Registers bit Masks
@@ -127,19 +128,21 @@
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes left to receive
- * @data_completion: completion structure
+ * @is_dual: Flag to indicate whether dual flash memories are used
+ * @is_instr: Flag to indicate if transfer contains an instruction
+ * (Used in dual parallel configuration)
*/
struct zynq_qspi {
- struct device *dev;
void __iomem *regs;
struct clk *refclk;
struct clk *pclk;
int irq;
- u8 *txbuf;
- u8 *rxbuf;
+ const void *txbuf;
+ void *rxbuf;
int tx_bytes;
int rx_bytes;
- struct completion data_completion;
+ u32 is_dual;
+ u8 is_instr;
};
/*
@@ -217,38 +220,45 @@ static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
ZYNQ_QSPI_TX_THRESHOLD);
+ if (xqspi->is_dual)
+ /* Enable two memories on separate buses */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ (ZYNQ_QSPI_LCFG_TWO_MEM |
+ ZYNQ_QSPI_LCFG_SEP_BUS |
+ (1 << ZYNQ_QSPI_LCFG_DUMMY_SHIFT) |
+ ZYNQ_QSPI_FAST_READ_QOUT_CODE));
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ /* Enable two memories on shared bus */
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ (ZYNQ_QSPI_LCFG_TWO_MEM |
+ (1 << ZYNQ_QSPI_LCFG_DUMMY_SHIFT) |
+ ZYNQ_QSPI_FAST_READ_QOUT_CODE));
+#endif
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
ZYNQ_QSPI_ENABLE_ENABLE_MASK);
}
-static bool zynq_qspi_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
-{
- if (!spi_mem_default_supports_op(mem, op))
- return false;
-
- /*
- * The number of address bytes should be equal to or less than 3 bytes.
- */
- if (op->addr.nbytes > 3)
- return false;
-
- return true;
-}
-
/**
* zynq_qspi_rxfifo_op - Read 1..4 bytes from RxFIFO to RX buffer
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be read (1..4)
+ *
+ * Note: In case of dual parallel connection, even number of bytes are read
+ * when odd bytes are requested to avoid transfer of a nibble to each flash.
+ * The receive buffer though, is populated with the number of bytes requested.
*/
static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
+ unsigned int xsize;
u32 data;
data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
if (xqspi->rxbuf) {
- memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - xsize, size);
xqspi->rxbuf += size;
}
@@ -261,12 +271,19 @@ static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
* zynq_qspi_txfifo_op - Write 1..4 bytes from TX buffer to TxFIFO
* @xqspi: Pointer to the zynq_qspi structure
* @size: Number of bytes to be written (1..4)
+ *
+ * In dual parallel configuration, when read/write data operations
+ * are performed, odd data bytes have to be converted to even to
+ * avoid a nibble (of data when programming / dummy when reading)
+ * going to individual flash devices, where a byte is expected.
+ * This check is only for data and will not apply for commands.
*/
static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
{
static const unsigned int offset[4] = {
ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
+ unsigned int xsize;
u32 data;
if (xqspi->txbuf) {
@@ -278,19 +295,66 @@ static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
}
xqspi->tx_bytes -= size;
- zynq_qspi_write(xqspi, offset[size - 1], data);
+ xsize = size;
+ if (xqspi->is_dual && !xqspi->is_instr && (size % 2))
+ xsize++;
+ zynq_qspi_write(xqspi, offset[xsize - 1], data);
+}
+
+/**
+ * zynq_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int zynq_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ clk_enable(xqspi->refclk);
+ clk_enable(xqspi->pclk);
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
+ ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * zynq_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: Always 0
+ */
+static int zynq_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
+ clk_disable(xqspi->refclk);
+ clk_disable(xqspi->pclk);
+
+ return 0;
}
/**
* zynq_qspi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
- * @assert: 1 for select or 0 for deselect the chip select line
+ * @is_high: Select(0) or deselect (1) the chip select line
*/
-static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
+static void zynq_qspi_chipselect(struct spi_device *spi, bool is_high)
{
struct spi_controller *ctlr = spi->master;
- struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(spi->master);
u32 config_reg;
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ u32 lqspi_cfg_reg;
+#endif
/* Select the lower (CS0) or upper (CS1) memory */
if (ctlr->num_chipselect > 1) {
@@ -305,18 +369,45 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
/* Ground the line to assert the CS */
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
- if (assert)
- config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
- else
+
+ /* Select upper/lower page before asserting CS */
+#ifdef CONFIG_SPI_ZYNQ_QSPI_DUAL_STACKED
+ lqspi_cfg_reg = zynq_qspi_read(xqspi,
+ ZYNQ_QSPI_LINEAR_CFG_OFFSET);
+ if (spi->master->flags & SPI_MASTER_U_PAGE)
+ lqspi_cfg_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
+ else
+ lqspi_cfg_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET,
+ lqspi_cfg_reg);
+#endif
+
+ if (is_high) {
+ /* Deselect the slave */
config_reg |= ZYNQ_QSPI_CONFIG_PCS;
+ } else {
+ /* Select the slave */
+ config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
+ if (gpio_is_valid(spi->cs_gpio)) {
+ config_reg |= (((~(BIT(0))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_PCS);
+ } else {
+ config_reg |= (((~(BIT(spi->chip_select))) <<
+ ZYNQ_QSPI_SS_SHIFT) &
+ ZYNQ_QSPI_CONFIG_PCS);
+ }
+ xqspi->is_instr = 1;
+ }
zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
}
/**
* zynq_qspi_config_op - Configure QSPI controller for specified transfer
- * @xqspi: Pointer to the zynq_qspi structure
* @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides information
+ * about next transfer setup parameters
*
* Sets the operational mode of QSPI controller for the next QSPI transfer and
* sets the requested clock frequency.
@@ -330,9 +421,16 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
* controller the driver will set the highest or lowest frequency supported by
* controller.
*/
-static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
+static int zynq_qspi_config_op(struct spi_device *spi,
+ struct spi_transfer *transfer)
{
- u32 config_reg, baud_rate_val = 0;
+ struct zynq_qspi *xqspi = spi_master_get_devdata(spi->master);
+ u32 config_reg, req_hz, baud_rate_val = 0;
+
+ if (transfer)
+ req_hz = transfer->speed_hz;
+ else
+ req_hz = spi->max_speed_hz;
/*
* Set the clock frequency
@@ -344,8 +442,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
* 111 - divide by 256
*/
while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
- (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
- spi->max_speed_hz)
+ (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) > req_hz)
baud_rate_val++;
config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
@@ -376,18 +473,24 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
*/
static int zynq_qspi_setup_op(struct spi_device *spi)
{
- struct spi_controller *ctlr = spi->master;
- struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
+ struct device *dev = &spi->master->dev;
+ int ret;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = devm_gpio_request(dev, spi->cs_gpio, dev_name(dev));
+ if (ret) {
+ dev_err(dev, "Invalid cs_gpio\n");
+ return ret;
+ }
- if (ctlr->busy)
- return -EBUSY;
+ gpio_direction_output(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
+ }
- clk_enable(qspi->refclk);
- clk_enable(qspi->pclk);
- zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
- ZYNQ_QSPI_ENABLE_ENABLE_MASK);
+ if (spi->master->busy)
+ return -EBUSY;
- return 0;
+ return zynq_qspi_config_op(spi, NULL);
}
/**
@@ -471,9 +574,10 @@ static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
*/
static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
{
+ struct spi_master *master = dev_id;
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
u32 intr_status;
bool txempty;
- struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
@@ -501,7 +605,8 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
zynq_qspi_write(xqspi,
ZYNQ_QSPI_IDIS_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
- complete(&xqspi->data_completion);
+ spi_finalize_current_transfer(master);
+ xqspi->is_instr = 0;
}
}
return IRQ_HANDLED;
@@ -511,110 +616,100 @@ static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
}
/**
- * zynq_qspi_exec_mem_op() - Initiates the QSPI transfer
- * @mem: the SPI memory
- * @op: the memory operation to execute
+ * zynq_qspi_start_transfer - Initiates the QSPI transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ * @qspi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provide information
+ * about next transfer parameters
*
- * Executes a memory operation.
+ * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int zynq_qspi_start_transfer(struct spi_master *master,
+ struct spi_device *qspi,
+ struct spi_transfer *transfer)
+{
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+
+ xqspi->txbuf = transfer->tx_buf;
+ xqspi->rxbuf = transfer->rx_buf;
+ xqspi->tx_bytes = transfer->len;
+ xqspi->rx_bytes = transfer->len;
+
+ if (!transfer->stripe)
+ xqspi->is_instr = true;
+ else
+ xqspi->is_instr = false;
+ zynq_qspi_config_op(qspi, transfer);
+
+ zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
+
+ zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
+ ZYNQ_QSPI_IXR_RXTX_MASK);
+
+ return transfer->len;
+}
+
+/**
+ * zynq_qspi_suspend - Suspend method for the QSPI driver
+ * @_dev: Address of the platform_device structure
*
- * This function first selects the chip and starts the memory operation.
+ * This function stops the QSPI driver queue and disables the QSPI controller
*
- * Return: 0 in case of success, a negative error code otherwise.
+ * Return: Always 0
*/
-static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+static int __maybe_unused zynq_qspi_suspend(struct device *_dev)
{
- struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
- int err = 0, i;
- u8 *tmpbuf;
-
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
- zynq_qspi_chipselect(mem->spi, true);
- zynq_qspi_config_op(xqspi, mem->spi);
-
- if (op->cmd.opcode) {
- reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = (u8 *)&op->cmd.opcode;
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = sizeof(op->cmd.opcode);
- xqspi->rx_bytes = sizeof(op->cmd.opcode);
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- }
+ struct platform_device *pdev = container_of(_dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
- if (op->addr.nbytes) {
- for (i = 0; i < op->addr.nbytes; i++) {
- xqspi->txbuf[i] = op->addr.val >>
- (8 * (op->addr.nbytes - i - 1));
- }
+ spi_master_suspend(master);
- reinit_completion(&xqspi->data_completion);
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = op->addr.nbytes;
- xqspi->rx_bytes = op->addr.nbytes;
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
- }
+ zynq_unprepare_transfer_hardware(master);
- if (op->dummy.nbytes) {
- tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
- memset(tmpbuf, 0xff, op->dummy.nbytes);
- reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = tmpbuf;
- xqspi->rxbuf = NULL;
- xqspi->tx_bytes = op->dummy.nbytes;
- xqspi->rx_bytes = op->dummy.nbytes;
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
-
- kfree(tmpbuf);
- }
+ return 0;
+}
- if (op->data.nbytes) {
- reinit_completion(&xqspi->data_completion);
- if (op->data.dir == SPI_MEM_DATA_OUT) {
- xqspi->txbuf = (u8 *)op->data.buf.out;
- xqspi->tx_bytes = op->data.nbytes;
- xqspi->rxbuf = NULL;
- xqspi->rx_bytes = op->data.nbytes;
- } else {
- xqspi->txbuf = NULL;
- xqspi->rxbuf = (u8 *)op->data.buf.in;
- xqspi->rx_bytes = op->data.nbytes;
- xqspi->tx_bytes = op->data.nbytes;
- }
+/**
+ * zynq_qspi_resume - Resume method for the QSPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * The function starts the QSPI driver queue and initializes the QSPI controller
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused zynq_qspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ ret = clk_enable(xqspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
- zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
- zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
- ZYNQ_QSPI_IXR_RXTX_MASK);
- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
- msecs_to_jiffies(1000)))
- err = -ETIMEDOUT;
+ ret = clk_enable(xqspi->refclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable(xqspi->pclk);
+ return ret;
}
- zynq_qspi_chipselect(mem->spi, false);
- return err;
+ spi_master_resume(master);
+
+ return 0;
}
-static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
- .supports_op = zynq_qspi_supports_op,
- .exec_op = zynq_qspi_exec_mem_op,
-};
+static SIMPLE_DEV_PM_OPS(zynq_qspi_dev_pm_ops, zynq_qspi_suspend,
+ zynq_qspi_resume);
/**
* zynq_qspi_probe - Probe method for the QSPI driver
@@ -628,9 +723,8 @@ static int zynq_qspi_probe(struct platform_device *pdev)
{
int ret = 0;
struct spi_controller *ctlr;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct zynq_qspi *xqspi;
+ struct resource *res;
u32 num_cs;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
@@ -638,14 +732,22 @@ static int zynq_qspi_probe(struct platform_device *pdev)
return -ENOMEM;
xqspi = spi_controller_get_devdata(ctlr);
- xqspi->dev = dev;
- platform_set_drvdata(pdev, xqspi);
- xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
goto remove_master;
}
+ if (of_property_read_u32(pdev->dev.of_node, "is-dual",
+ &xqspi->is_dual)) {
+ dev_warn(&pdev->dev, "couldn't determine configuration info");
+ dev_warn(&pdev->dev, "about dual memories. defaulting to single memory\n");
+ }
+
xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(xqspi->pclk)) {
dev_err(&pdev->dev, "pclk clock not found.\n");
@@ -653,8 +755,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
goto remove_master;
}
- init_completion(&xqspi->data_completion);
-
xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xqspi->refclk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
@@ -677,17 +777,18 @@ static int zynq_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
+ dev_err(&pdev->dev, "irq resource not found\n");
goto remove_master;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
- 0, pdev->name, xqspi);
+ 0, pdev->name, ctlr);
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
goto remove_master;
}
- ret = of_property_read_u32(np, "num-cs",
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs",
&num_cs);
if (ret < 0) {
ctlr->num_chipselect = 1;
@@ -698,12 +799,18 @@ static int zynq_qspi_probe(struct platform_device *pdev)
ctlr->num_chipselect = num_cs;
}
- ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
- SPI_TX_DUAL | SPI_TX_QUAD;
- ctlr->mem_ops = &zynq_qspi_mem_ops;
ctlr->setup = zynq_qspi_setup_op;
+ ctlr->set_cs = zynq_qspi_chipselect;
+ ctlr->transfer_one = zynq_qspi_start_transfer;
+ ctlr->prepare_transfer_hardware = zynq_prepare_transfer_hardware;
+ ctlr->unprepare_transfer_hardware = zynq_unprepare_transfer_hardware;
+ ctlr->flags = SPI_MASTER_QUAD_MODE | SPI_MASTER_GPIO_SS;
+
ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
- ctlr->dev.of_node = np;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+
/* QSPI controller initializations */
zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
@@ -738,13 +845,15 @@ remove_master:
*/
static int zynq_qspi_remove(struct platform_device *pdev)
{
- struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct zynq_qspi *xqspi = spi_master_get_devdata(master);
zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
+ spi_unregister_master(master);
return 0;
}
@@ -764,6 +873,7 @@ static struct platform_driver zynq_qspi_driver = {
.driver = {
.name = "zynq-qspi",
.of_match_table = zynq_qspi_of_match,
+ .pm = &zynq_qspi_dev_pm_ops,
},
};
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 7412a3042a8d..27c793cdf634 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -33,6 +34,7 @@
#define GQSPI_RXD_OFST 0x00000120
#define GQSPI_TX_THRESHOLD_OFST 0x00000128
#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
+#define IOU_TAPDLY_BYPASS_OFST 0x0000003C
#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
#define GQSPI_GEN_FIFO_OFST 0x00000140
#define GQSPI_SEL_OFST 0x00000144
@@ -47,6 +49,7 @@
#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
+#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
/* GQSPI register bit masks */
#define GQSPI_SEL_MASK 0x00000001
@@ -132,12 +135,45 @@
#define GQSPI_SELECT_MODE_QUADSPI 0x4
#define GQSPI_DMA_UNALIGN 0x3
#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
+#define GQSPI_RX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_RX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_RX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_TX_BUS_WIDTH_QUAD 0x4
+#define GQSPI_TX_BUS_WIDTH_DUAL 0x2
+#define GQSPI_TX_BUS_WIDTH_SINGLE 0x1
+#define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
+#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
+#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
+#define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
+#define GQSPI_USE_DATA_DLY 0x1
+#define GQSPI_USE_DATA_DLY_SHIFT 31
+#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
+#define GQSPI_DATA_DLY_ADJ_SHIFT 28
+#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
+#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
+
+/* set to differentiate versal from zynqmp, 1=versal, 0=zynqmp */
+#define QSPI_QUIRK_HAS_TAPDELAY BIT(0)
+
+#define GQSPI_FREQ_37_5MHZ 37500000
+#define GQSPI_FREQ_40MHZ 40000000
+#define GQSPI_FREQ_100MHZ 100000000
+#define GQSPI_FREQ_150MHZ 150000000
+#define IOU_TAPDLY_BYPASS_MASK 0x7
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
static const struct zynqmp_eemi_ops *eemi_ops;
/**
+ * struct zynq_platform_data - zynqmp qspi platform data structure
+ * @quirks: Flags is used to identify the platform
+ */
+struct qspi_platform_data {
+ u32 quirks;
+};
+
+/**
* struct zynqmp_qspi - Defines qspi driver instance
* @regs: Virtual address of the QSPI controller registers
* @refclk: Pointer to the peripheral clock
@@ -152,8 +188,14 @@ static const struct zynqmp_eemi_ops *eemi_ops;
* @genfifobus: Used to select the upper or lower bus
* @dma_rx_bytes: Remaining bytes to receive by DMA mode
* @dma_addr: DMA address after mapping the kernel buffer
+ * @tx_bus_width: Used to represent number of data wires for tx
+ * @rx_bus_width: Used to represent number of data wires
* @genfifoentry: Used for storing the genfifoentry instruction.
+ * @isinstr: To determine whether the transfer is instruction
* @mode: Defines the mode in which QSPI is operating
+ * @speed_hz: Current SPI bus clock speed in hz
+ * @io_mode: Defines the operating mode, either IO or dma
+ * @has_tapdelay: Used for tapdelay register available in qspi
*/
struct zynqmp_qspi {
void __iomem *regs;
@@ -169,14 +211,22 @@ struct zynqmp_qspi {
u32 genfifobus;
u32 dma_rx_bytes;
dma_addr_t dma_addr;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
u32 genfifoentry;
+ bool isinstr;
enum mode_type mode;
+ u32 speed_hz;
+ bool io_mode;
+ bool has_tapdelay;
};
/**
- * zynqmp_gqspi_read: For GQSPI controller read operation
+ * zynqmp_gqspi_read - For GQSPI controller read operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset from where to read
+ *
+ * Return: Value read from the qspi register
*/
static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
{
@@ -184,7 +234,7 @@ static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
}
/**
- * zynqmp_gqspi_write: For GQSPI controller write operation
+ * zynqmp_gqspi_write - For GQSPI controller write operation
* @xqspi: Pointer to the zynqmp_qspi structure
* @offset: Offset where to write
* @val: Value to be written
@@ -196,10 +246,10 @@ static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
}
/**
- * zynqmp_gqspi_selectslave: For selection of slave device
+ * zynqmp_gqspi_selectslave - For selection of slave device
* @instanceptr: Pointer to the zynqmp_qspi structure
- * @flashcs: For chip select
- * @flashbus: To check which bus is selected- upper or lower
+ * @slavecs: For chip select
+ * @slavebus: To check which bus is selected- upper or lower
*/
static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
u8 slavecs, u8 slavebus)
@@ -243,7 +293,76 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
}
/**
- * zynqmp_qspi_init_hw: Initialize the hardware
+ * zynqmp_qspi_set_tapdelay - To configure qspi tap delays
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @baudrateval: Buadrate to configure
+ */
+static void zynqmp_qspi_set_tapdelay(struct zynqmp_qspi *xqspi, u32 baudrateval)
+{
+ u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
+ u32 reqhz = 0;
+
+ if (!eemi_ops->ioctl)
+ return;
+
+ clk_rate = clk_get_rate(xqspi->refclk);
+ reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
+
+ if (!xqspi->has_tapdelay) {
+ if (reqhz <= GQSPI_FREQ_40MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ eemi_ops->ioctl(NODE_QSPI, IOCTL_SET_TAPDELAY_BYPASS,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_BYPASS_ENABLE,
+ NULL);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK;
+ }
+ } else {
+ if (reqhz <= GQSPI_FREQ_37_5MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
+ datadlyadj |= (GQSPI_USE_DATA_DLY <<
+ GQSPI_USE_DATA_DLY_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK
+ | (GQSPI_LPBK_DLY_ADJ_DLY_1 <<
+ GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT));
+ }
+ zynqmp_gqspi_write(xqspi,
+ IOU_TAPDLY_BYPASS_OFST, tapdlybypass);
+ }
+
+ zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST, lpbkdlyadj);
+ zynqmp_gqspi_write(xqspi, GQSPI_DATA_DLY_ADJ_OFST, datadlyadj);
+}
+
+static u32 zynqmp_disable_intr(struct zynqmp_qspi *xqspi)
+{
+ u32 value;
+
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ value = zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST);
+ zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+
+ return value;
+}
+
+/**
+ * zynqmp_qspi_init_hw - Initialize the hardware
* @xqspi: Pointer to the zynqmp_qspi structure
*
* The default settings of the QSPI controller's configurable parameters on
@@ -267,9 +386,7 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
/* Select the GQSPI mode */
zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
/* Clear and disable interrupts */
- zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
- zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
- GQSPI_ISR_WR_TO_CLR_MASK);
+ zynqmp_disable_intr(xqspi);
/* Clear the DMA STS */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
zynqmp_gqspi_read(xqspi,
@@ -321,17 +438,17 @@ static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_selectslave(xqspi,
GQSPI_SELECT_FLASH_CS_LOWER,
GQSPI_SELECT_FLASH_BUS_LOWER);
- /* Initialize DMA */
- zynqmp_gqspi_write(xqspi,
- GQSPI_QSPIDMA_DST_CTRL_OFST,
- GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
-
+ if (!xqspi->io_mode)
+ /* Initialize DMA */
+ zynqmp_gqspi_write(xqspi,
+ GQSPI_QSPIDMA_DST_CTRL_OFST,
+ GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
/* Enable the GQSPI */
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
}
/**
- * zynqmp_qspi_copy_read_data: Copy data to RX buffer
+ * zynqmp_qspi_copy_read_data - Copy data to RX buffer
* @xqspi: Pointer to the zynqmp_qspi structure
* @data: The variable where data is stored
* @size: Number of bytes to be copied from data to RX buffer
@@ -345,7 +462,7 @@ static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
+ * zynqmp_prepare_transfer_hardware - Prepares hardware for transfer.
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -362,7 +479,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
+ * zynqmp_unprepare_transfer_hardware - Relaxes hardware after transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
*
@@ -379,7 +496,7 @@ static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
}
/**
- * zynqmp_qspi_chipselect: Select or deselect the chip select line
+ * zynqmp_qspi_chipselect - Select or deselect the chip select line
* @qspi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
*/
@@ -390,11 +507,25 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
u32 genfifoentry = 0x0, statusreg;
genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+ if (qspi->master->flags & SPI_MASTER_BOTH_CS) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_BOTH,
+ GQSPI_SELECT_FLASH_BUS_BOTH);
+ } else if (qspi->master->flags & SPI_MASTER_U_PAGE) {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_UPPER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ } else {
+ zynqmp_gqspi_selectslave(xqspi,
+ GQSPI_SELECT_FLASH_CS_LOWER,
+ GQSPI_SELECT_FLASH_BUS_LOWER);
+ }
genfifoentry |= xqspi->genfifobus;
if (!is_high) {
genfifoentry |= xqspi->genfifocs;
genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
+ xqspi->isinstr = true;
} else {
genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
}
@@ -415,8 +546,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
(statusreg & GQSPI_ISR_TXEMPTY_MASK))
break;
- else
- cpu_relax();
+ cpu_relax();
} while (!time_after_eq(jiffies, timeout));
if (time_after_eq(jiffies, timeout))
@@ -424,7 +554,7 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
}
/**
- * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
+ * zynqmp_qspi_setup_transfer - Configure QSPI controller for specified
* transfer
* @qspi: Pointer to the spi_device structure
* @transfer: Pointer to the spi_transfer structure which provides
@@ -457,33 +587,39 @@ static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
else
req_hz = qspi->max_speed_hz;
- /* Set the clock frequency */
- /* If req_hz == 0, default to lowest speed */
- clk_rate = clk_get_rate(xqspi->refclk);
+ if (xqspi->speed_hz != req_hz) {
+ /* Set the clock frequency */
+ /* If req_hz == 0, default to lowest speed */
+ clk_rate = clk_get_rate(xqspi->refclk);
- while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
- (clk_rate /
- (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
- baud_rate_val++;
+ while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
+ (clk_rate /
+ (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
+ baud_rate_val++;
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+ config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- /* Set the QSPI clock phase and clock polarity */
- config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
+ /* Set the QSPI clock phase and clock polarity */
+ config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) &
+ (~GQSPI_CFG_CLK_POL_MASK);
- if (qspi->mode & SPI_CPHA)
- config_reg |= GQSPI_CFG_CLK_PHA_MASK;
- if (qspi->mode & SPI_CPOL)
- config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ if (qspi->mode & SPI_CPHA)
+ config_reg |= GQSPI_CFG_CLK_PHA_MASK;
+ if (qspi->mode & SPI_CPOL)
+ config_reg |= GQSPI_CFG_CLK_POL_MASK;
+ config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
+ config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->speed_hz = clk_rate / (GQSPI_BAUD_DIV_SHIFT <<
+ baud_rate_val);
+ zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
+ }
- config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
- config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
return 0;
}
/**
- * zynqmp_qspi_setup: Configure the QSPI controller
+ * zynqmp_qspi_setup - Configure the QSPI controller
* @qspi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer,
@@ -499,7 +635,7 @@ static int zynqmp_qspi_setup(struct spi_device *qspi)
}
/**
- * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
+ * zynqmp_qspi_filltxfifo - Fills the TX FIFO as long as there is room in
* the FIFO or the bytes required to be
* transmitted.
* @xqspi: Pointer to the zynqmp_qspi structure
@@ -510,22 +646,24 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
u32 count = 0, intermediate;
while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
- memcpy(&intermediate, xqspi->txbuf, 4);
- zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
-
if (xqspi->bytes_to_transfer >= 4) {
+ memcpy(&intermediate, xqspi->txbuf, 4);
xqspi->txbuf += 4;
xqspi->bytes_to_transfer -= 4;
+ count += 4;
} else {
+ memcpy(&intermediate, xqspi->txbuf,
+ xqspi->bytes_to_transfer);
xqspi->txbuf += xqspi->bytes_to_transfer;
xqspi->bytes_to_transfer = 0;
+ count += xqspi->bytes_to_transfer;
}
- count++;
+ zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
}
}
/**
- * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
+ * zynqmp_qspi_readrxfifo - Fills the RX FIFO as long as there is room in
* the FIFO.
* @xqspi: Pointer to the zynqmp_qspi structure
* @size: Number of bytes to be copied from RX buffer to RX FIFO
@@ -553,7 +691,40 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
}
/**
- * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
+ * zynqmp_qspi_preparedummy - Prepares the dummy entry
+ *
+ * @xqspi: Pointer to the zynqmp_qspi structure
+ * @transfer: It is a pointer to the structure containing transfer data.
+ * @genfifoentry: genfifoentry is pointer to the variable in which
+ * GENFIFO mask is returned to calling function
+ */
+static void zynqmp_qspi_preparedummy(struct zynqmp_qspi *xqspi,
+ struct spi_transfer *transfer,
+ u32 *genfifoentry)
+{
+ /* For dummy Tx and Rx are NULL */
+ *genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
+
+ /* SPI mode */
+ *genfifoentry &= ~GQSPI_GENFIFO_MODE_QUADSPI;
+ if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_QUAD ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_QUAD)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_QUADSPI;
+ else if (xqspi->rx_bus_width == GQSPI_RX_BUS_WIDTH_DUAL ||
+ xqspi->tx_bus_width == GQSPI_TX_BUS_WIDTH_DUAL)
+ *genfifoentry |= GQSPI_GENFIFO_MODE_DUALSPI;
+ else
+ *genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
+
+ /* Immediate data */
+ *genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+
+ if (transfer->dummy)
+ *genfifoentry |= (transfer->dummy / transfer->tx_nbits);
+}
+
+/**
+ * zynqmp_process_dma_irq - Handler for DMA done interrupt of QSPI
* controller
* @xqspi: zynqmp_qspi instance pointer
*
@@ -601,7 +772,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
+ * zynqmp_qspi_irq - Interrupt service routine of the QSPI controller
* @irq: IRQ number
* @dev_id: Pointer to the xqspi structure
*
@@ -639,23 +810,29 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
zynqmp_process_dma_irq(xqspi);
ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ } else if ((mask & GQSPI_IER_RXNEMPTY_MASK)) {
+ zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
+ ret = IRQ_HANDLED;
+ }
+ if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
ret = IRQ_HANDLED;
}
if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
&& ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
- zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
+ zynqmp_disable_intr(xqspi);
+ xqspi->isinstr = false;
spi_finalize_current_transfer(master);
ret = IRQ_HANDLED;
}
+
return ret;
}
/**
- * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
+ * zynqmp_qspi_selectspimode - Selects SPI mode - x1 or x2 or x4.
* @xqspi: xqspi is a pointer to the GQSPI instance
* @spimode: spimode - SPI or DUAL or QUAD.
* Return: Mask to set desired SPI mode in GENFIFO entry.
@@ -683,7 +860,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
}
/**
- * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
+ * zynq_qspi_setuprxdma - This function sets up the RX DMA operation
* @xqspi: xqspi is a pointer to the GQSPI instance.
*/
static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
@@ -692,8 +869,9 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
- if ((xqspi->bytes_to_receive < 8) ||
- ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
+ if ((xqspi->bytes_to_receive < 8 || xqspi->io_mode) ||
+ ((dma_align & GQSPI_DMA_UNALIGN) != 0x0) ||
+ is_vmalloc_addr(xqspi->rxbuf)) {
/* Setting to IO mode */
config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
@@ -733,7 +911,7 @@ static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
}
/**
- * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
+ * zynqmp_qspi_txrxsetup - This function checks the TX/RX buffers in
* the transfer and sets up the GENFIFO entries,
* TX FIFO as required.
* @xqspi: xqspi is a pointer to the GQSPI instance.
@@ -755,7 +933,8 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
*genfifoentry |= GQSPI_GENFIFO_TX;
*genfifoentry |=
zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
- xqspi->bytes_to_transfer = transfer->len;
+ xqspi->bytes_to_transfer = transfer->len -
+ (transfer->dummy / 8);
if (xqspi->mode == GQSPI_MODE_DMA) {
config_reg = zynqmp_gqspi_read(xqspi,
GQSPI_CONFIG_OFST);
@@ -784,7 +963,7 @@ static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
}
/**
- * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
+ * zynqmp_qspi_start_transfer - Initiates the QSPI transfer
* @master: Pointer to the spi_master structure which provides
* information about the controller.
* @qspi: Pointer to the spi_device structure
@@ -811,18 +990,28 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
genfifoentry |= xqspi->genfifocs;
genfifoentry |= xqspi->genfifobus;
+ if (!xqspi->isinstr && (master->flags & SPI_MASTER_DATA_STRIPE)) {
+ if (transfer->stripe)
+ genfifoentry |= GQSPI_GENFIFO_STRIPE;
+ }
zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
- transfer_len = transfer->len;
+ transfer_len = transfer->len - (transfer->dummy / 8);
xqspi->genfifoentry = genfifoentry;
if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
genfifoentry |= transfer_len;
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
+ if (transfer->dummy || transfer->tx_nbits >= 1) {
+ zynqmp_qspi_preparedummy(xqspi, transfer,
+ &genfifoentry);
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry);
+ }
} else {
int tempcount = transfer_len;
u32 exponent = 8; /* 2^8 = 256 */
@@ -849,6 +1038,14 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (imm_data != 0) {
genfifoentry &= ~GQSPI_GENFIFO_EXP;
genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
+ if (imm_data % 4 != 0) {
+ if (((imm_data + 4 -
+ (imm_data % 4)) & 0xFF) == 0x00)
+ imm_data = 0xFF;
+ else
+ imm_data = imm_data + 4 - (imm_data
+ % 4);
+ }
genfifoentry |= (u8) (imm_data & 0xFF);
zynqmp_gqspi_write(xqspi,
GQSPI_GEN_FIFO_OFST, genfifoentry);
@@ -869,7 +1066,6 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
if (xqspi->txbuf != NULL)
/* Enable interrupts for TX */
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
- GQSPI_IER_TXEMPTY_MASK |
GQSPI_IER_GENFIFOEMPTY_MASK |
GQSPI_IER_TXNOT_FULL_MASK);
@@ -883,8 +1079,7 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
} else {
zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
GQSPI_IER_GENFIFOEMPTY_MASK |
- GQSPI_IER_RXNEMPTY_MASK |
- GQSPI_IER_RXEMPTY_MASK);
+ GQSPI_IER_RXNEMPTY_MASK);
}
}
@@ -892,8 +1087,8 @@ static int zynqmp_qspi_start_transfer(struct spi_master *master,
}
/**
- * zynqmp_qspi_suspend: Suspend method for the QSPI driver
- * @_dev: Address of the platform_device structure
+ * zynqmp_qspi_suspend - Suspend method for the QSPI driver
+ * @dev: Address of the platform_device structure
*
* This function stops the QSPI driver queue and disables the QSPI controller
*
@@ -911,7 +1106,7 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
}
/**
- * zynqmp_qspi_resume: Resume method for the QSPI driver
+ * zynqmp_qspi_resume - Resume method for the QSPI driver
* @dev: Address of the platform_device structure
*
* The function starts the QSPI driver queue and initializes the QSPI
@@ -938,6 +1133,7 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
return ret;
}
+ zynqmp_qspi_init_hw(xqspi);
spi_master_resume(master);
clk_disable(xqspi->refclk);
@@ -994,14 +1190,37 @@ static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused zynqmp_runtime_idle(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
+ u32 value;
+
+ value = zynqmp_gqspi_read(xqspi, GQSPI_EN_OFST);
+ if (value)
+ return -EBUSY;
+
+ return 0;
+}
+
static const struct dev_pm_ops zynqmp_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(zynqmp_runtime_suspend,
- zynqmp_runtime_resume, NULL)
+ zynqmp_runtime_resume, zynqmp_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(zynqmp_qspi_suspend, zynqmp_qspi_resume)
};
+static const struct qspi_platform_data versal_qspi_def = {
+ .quirks = QSPI_QUIRK_HAS_TAPDELAY,
+};
+
+static const struct of_device_id zynqmp_qspi_of_match[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0"},
+ { .compatible = "xlnx,versal-qspi-1.0", .data = &versal_qspi_def },
+ { /* End of table */ }
+};
+
/**
- * zynqmp_qspi_probe: Probe method for the QSPI driver
+ * zynqmp_qspi_probe - Probe method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function initializes the driver data structures and the hardware.
@@ -1014,6 +1233,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
+ struct device_node *nc;
+ const struct of_device_id *match;
+ u32 num_cs;
+ u32 rx_bus_width;
+ u32 tx_bus_width;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
@@ -1027,6 +1251,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, master);
+ match = of_match_node(zynqmp_qspi_of_match, pdev->dev.of_node);
+ if (match) {
+ const struct qspi_platform_data *p_data = match->data;
+
+ if (p_data && (p_data->quirks & QSPI_QUIRK_HAS_TAPDELAY))
+ xqspi->has_tapdelay = true;
+ }
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xqspi->regs)) {
ret = PTR_ERR(xqspi->regs);
@@ -1064,6 +1295,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+
+ if (of_property_read_bool(pdev->dev.of_node, "has-io-mode"))
+ xqspi->io_mode = true;
+
/* QSPI controller initializations */
zynqmp_qspi_init_hw(xqspi);
@@ -1072,6 +1307,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
xqspi->irq = platform_get_irq(pdev, 0);
if (xqspi->irq <= 0) {
ret = -ENXIO;
+ dev_err(dev, "irq resource not found\n");
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
@@ -1082,8 +1318,37 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
- master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ xqspi->rx_bus_width = GQSPI_RX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-rx-bus-width",
+ &rx_bus_width);
+ if (!ret) {
+ xqspi->rx_bus_width = rx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "rx bus width not found\n");
+
+ xqspi->tx_bus_width = GQSPI_TX_BUS_WIDTH_SINGLE;
+ for_each_available_child_of_node(pdev->dev.of_node, nc) {
+ ret = of_property_read_u32(nc, "spi-tx-bus-width",
+ &tx_bus_width);
+ if (!ret) {
+ xqspi->tx_bus_width = tx_bus_width;
+ break;
+ }
+ }
+ if (ret)
+ dev_err(dev, "tx bus width not found\n");
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
master->setup = zynqmp_qspi_setup;
master->set_cs = zynqmp_qspi_chipselect;
master->transfer_one = zynqmp_qspi_start_transfer;
@@ -1094,6 +1359,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ xqspi->speed_hz = master->max_speed_hz;
+ master->auto_runtime_pm = true;
if (master->dev.parent == NULL)
master->dev.parent = &master->dev;
@@ -1117,7 +1384,7 @@ remove_master:
}
/**
- * zynqmp_qspi_remove: Remove method for the QSPI driver
+ * zynqmp_qspi_remove - Remove method for the QSPI driver
* @pdev: Pointer to the platform_device structure
*
* This function is called if a device is physically removed from the system or
@@ -1142,11 +1409,6 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id zynqmp_qspi_of_match[] = {
- { .compatible = "xlnx,zynqmp-qspi-1.0", },
- { /* End of table */ }
-};
-
MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a6e16c138845..304c3a3fa27b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1966,6 +1966,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
+ /* Multi die flash */
+ if (of_property_read_bool(nc, "multi-die"))
+ spi->multi_die = true;
return 0;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index a9939ff9490e..a20e603bae5d 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,7 +78,7 @@ source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
-source "drivers/staging/clocking-wizard/Kconfig"
+source "drivers/staging/apf/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -120,4 +120,14 @@ source "drivers/staging/qlge/Kconfig"
source "drivers/staging/wfx/Kconfig"
+source "drivers/staging/fclk/Kconfig"
+
+source "drivers/staging/xlnxsync/Kconfig"
+
+source "drivers/staging/xlnx_tsmux/Kconfig"
+
+source "drivers/staging/xroeframer/Kconfig"
+
+source "drivers/staging/xroetrafficgen/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 4d34198151b3..3d7114df5b7a 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_XILINX_APF) += apf/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
@@ -50,3 +50,7 @@ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_WFX) += wfx/
+obj-$(CONFIG_XILINX_FCLK) += fclk/
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync/
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_tsmux/
+obj-$(CONFIG_XROE_FRAMER) += xroeframer/
diff --git a/drivers/staging/apf/Kconfig b/drivers/staging/apf/Kconfig
new file mode 100644
index 000000000000..33cf32d43d02
--- /dev/null
+++ b/drivers/staging/apf/Kconfig
@@ -0,0 +1,20 @@
+#
+# APF driver configuration
+#
+
+menuconfig XILINX_APF
+ bool "Xilinx APF Accelerator driver"
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ default n
+ select UIO
+ select DMA_SHARED_BUFFER
+ help
+ Select if you want to include APF accelerator driver
+
+config XILINX_DMA_APF
+ bool "Xilinx APF DMA engines support"
+ depends on XILINX_APF
+ select DMA_ENGINE
+ select DMADEVICES
+ help
+ Enable support for the Xilinx APF DMA controllers.
diff --git a/drivers/staging/apf/Makefile b/drivers/staging/apf/Makefile
new file mode 100644
index 000000000000..bf281a2c16df
--- /dev/null
+++ b/drivers/staging/apf/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_XILINX_APF) += -DDEBUG
+ccflags-$(CONFIG_XILINX_APF) += -Idrivers/dma
+
+obj-$(CONFIG_XILINX_APF) += xlnk.o
+obj-$(CONFIG_XILINX_APF) += xlnk-eng.o
+obj-$(CONFIG_XILINX_DMA_APF) += xilinx-dma-apf.o
+
diff --git a/drivers/staging/apf/dt-binding.txt b/drivers/staging/apf/dt-binding.txt
new file mode 100644
index 000000000000..fd73725fa589
--- /dev/null
+++ b/drivers/staging/apf/dt-binding.txt
@@ -0,0 +1,17 @@
+* Xilinx APF xlnk driver
+
+Required properties:
+- compatible: Should be "xlnx,xlnk"
+- clock-names: List of clock names
+- clocks: List of clock sources corresponding to the clock names
+
+The number of elements on the clock-names and clocks lists should be the same.
+If there are no controllable clocks, the xlnk node should be omitted from the
+devicetree.
+
+Example:
+ xlnk {
+ compatible = "xlnx,xlnk-1.0";
+ clock-names = "clk166", "clk150", "clk100", "clk200";
+ clocks = <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ };
diff --git a/drivers/staging/apf/xilinx-dma-apf.c b/drivers/staging/apf/xilinx-dma-apf.c
new file mode 100644
index 000000000000..55913130eafc
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.c
@@ -0,0 +1,1232 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver supports Xilinx AXI DMA engine:
+ * . Axi DMA engine, it does transfers between memory and device. It can be
+ * configured to have one channel or two channels. If configured as two
+ * channels, one is for transmit to device and another is for receive from
+ * device.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/dma-buf.h>
+
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+
+#include "xilinx-dma-apf.h"
+
+#include "xlnk.h"
+
+static DEFINE_MUTEX(dma_list_mutex);
+static LIST_HEAD(dma_device_list);
+/* IO accessors */
+#define DMA_OUT_64(addr, val) (writeq(val, addr))
+#define DMA_OUT(addr, val) (iowrite32(val, addr))
+#define DMA_IN(addr) (ioread32(addr))
+
+#define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
+#define GET_HI(x) ((u32)((x) / 0x100000000))
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
+/* Driver functions */
+static void xdma_clean_bd(struct xdma_desc_hw *bd)
+{
+ bd->src_addr = 0x0;
+ bd->control = 0x0;
+ bd->status = 0x0;
+ bd->app[0] = 0x0;
+ bd->app[1] = 0x0;
+ bd->app[2] = 0x0;
+ bd->app[3] = 0x0;
+ bd->app[4] = 0x0;
+ bd->dmahead = 0x0;
+ bd->sw_flag = 0x0;
+}
+
+static int dma_is_running(struct xdma_chan *chan)
+{
+ return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
+ (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
+}
+
+static int dma_is_idle(struct xdma_chan *chan)
+{
+ return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
+}
+
+static void dma_halt(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
+}
+
+static void dma_start(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
+}
+
+static int dma_init(struct xdma_chan *chan)
+{
+ int loop = XDMA_RESET_LOOP;
+
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
+
+ /* Wait for the hardware to finish reset
+ */
+ while (loop) {
+ if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
+ break;
+
+ loop -= 1;
+ }
+
+ if (!loop)
+ return 1;
+
+ return 0;
+}
+
+static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
+{
+ int i;
+ u8 *ptr;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx DMA specification requirement.
+ */
+ ptr = (u8 *)dma_alloc_coherent(chan->dev,
+ (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
+ &chan->bd_phys_addr,
+ GFP_KERNEL);
+
+ if (!ptr) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
+
+ /*
+ * Pre allocate all the channels.
+ */
+ for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
+ chan->bds[i] = (struct xdma_desc_hw *)
+ (ptr + (sizeof(struct xdma_desc_hw) * i));
+ chan->bds[i]->next_desc = chan->bd_phys_addr +
+ (sizeof(struct xdma_desc_hw) *
+ ((i + 1) % XDMA_MAX_BD_CNT));
+ }
+
+ /* there is at least one descriptor free to be allocated */
+ return 0;
+}
+
+static void xdma_free_chan_resources(struct xdma_chan *chan)
+{
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
+ XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
+}
+
+static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
+{
+ struct xdma_desc_hw *desc;
+ unsigned int start, end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ start = 0;
+ end = XDMA_MAX_BD_CNT;
+
+ while (start < end) {
+ desc = chan->bds[start];
+ xdma_clean_bd(desc);
+ start++;
+ }
+ /* Re-initialize bd_cur and bd_tail values */
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
+{
+ struct xdma_head *dmahead;
+ struct xdma_desc_hw *desc;
+ struct completion *cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+#define XDMA_BD_STS_RXEOF_MASK 0x04000000
+ desc = chan->bds[chan->bd_cur];
+ while (desc->status & XDMA_BD_STS_ALL_MASK) {
+ if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
+ !(desc->dmahead)) {
+ pr_info("ERROR: premature EOF on DMA\n");
+ dma_init(chan); /* reset the dma HW */
+ while (!(desc->dmahead)) {
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ }
+ if (desc->dmahead) {
+ if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
+ if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
+ break;
+
+ dmahead = (struct xdma_head *)desc->dmahead;
+ cmp = (struct completion *)&dmahead->cmp;
+ if (dmahead->nappwords_o)
+ memcpy(dmahead->appwords_o, desc->app,
+ dmahead->nappwords_o * sizeof(u32));
+
+ if (chan->poll_mode)
+ cmp->done = 1;
+ else
+ complete(cmp);
+ }
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xdma_err_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ if (chan->err) {
+ /* If reset failed, need to hard reset
+ * Channel is no longer functional
+ */
+ if (!dma_init(chan))
+ chan->err = 0;
+ else
+ dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
+ }
+
+ /* Barrier to assert descriptor init is reaches memory */
+ rmb();
+ xilinx_chan_desc_cleanup(chan);
+
+ xilinx_chan_desc_reinit(chan);
+}
+
+static void xdma_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ xilinx_chan_desc_cleanup(chan);
+}
+
+static void dump_cur_bd(struct xdma_chan *chan)
+{
+ u32 index;
+
+ index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
+ sizeof(struct xdma_desc_hw);
+
+ dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
+ dev_err(chan->dev, " buf = %p\n",
+ (void *)chan->bds[index]->src_addr);
+ dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
+ dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
+ dev_err(chan->dev, " next = %p\n",
+ (void *)chan->bds[index]->next_desc);
+}
+
+static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void xdma_start_transfer(struct xdma_chan *chan,
+ int start_index,
+ int end_index)
+{
+ xlnk_intptr_type cur_phys;
+ xlnk_intptr_type tail_phys;
+ u32 regval;
+
+ if (chan->err)
+ return;
+
+ cur_phys = chan->bd_phys_addr + (start_index *
+ sizeof(struct xdma_desc_hw));
+ tail_phys = chan->bd_phys_addr + (end_index *
+ sizeof(struct xdma_desc_hw));
+ /* If hardware is busy, move the tail & return */
+ if (dma_is_running(chan) || dma_is_idle(chan)) {
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+ return;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->cdr, cur_phys);
+#else
+ DMA_OUT_64(&chan->regs->cdr, cur_phys);
+#endif
+
+ dma_start(chan);
+
+ /* Enable interrupts */
+ regval = DMA_IN(&chan->regs->cr);
+ regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
+ : XDMA_XR_IRQ_ALL_MASK);
+ DMA_OUT(&chan->regs->cr, regval);
+
+ /* Update tail ptr register and start the transfer */
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+}
+
+static int xdma_setup_hw_desc(struct xdma_chan *chan,
+ struct xdma_head *dmahead,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned int nappwords_i,
+ u32 *appwords_i)
+{
+ struct xdma_desc_hw *bd = NULL;
+ size_t copy;
+ struct scatterlist *sg;
+ size_t sg_used;
+ dma_addr_t dma_src;
+ int i, start_index = -1, end_index1 = 0, end_index2 = -1;
+ int status;
+ unsigned long flags;
+ unsigned int bd_used_saved;
+
+ if (!chan) {
+ pr_err("Requested transfer on invalid channel\n");
+ return -ENODEV;
+ }
+
+ /* if we almost run out of bd, try to recycle some */
+ if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
+ xilinx_chan_desc_cleanup(chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ bd_used_saved = chan->bd_used;
+ /*
+ * Build transactions using information in the scatter gather list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ /* Allocate the link descriptor from DMA pool */
+ bd = chan->bds[chan->bd_tail];
+ if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
+ end_index2 = chan->bd_tail;
+ status = -ENOMEM;
+ /* If first was not set, then we failed to
+ * allocate the very first descriptor,
+ * and we're done
+ */
+ if (start_index == -1)
+ goto out_unlock;
+ else
+ goto out_clean;
+ }
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the DMA controller limit
+ */
+ copy = min((size_t)(sg_dma_len(sg) - sg_used),
+ (size_t)chan->max_len);
+ /*
+ * Only the src address for DMA
+ */
+ dma_src = sg_dma_address(sg) + sg_used;
+ bd->src_addr = dma_src;
+
+ /* Fill in the descriptor */
+ bd->control = copy;
+
+ /*
+ * If this is not the first descriptor, chain the
+ * current descriptor after the previous descriptor
+ *
+ * For the first DMA_TO_DEVICE transfer, set SOP
+ */
+ if (start_index == -1) {
+ start_index = chan->bd_tail;
+
+ if (nappwords_i)
+ memcpy(bd->app, appwords_i,
+ nappwords_i * sizeof(u32));
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_SOP;
+ }
+
+ sg_used += copy;
+ end_index2 = chan->bd_tail;
+ chan->bd_tail++;
+ chan->bd_used++;
+ if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
+ end_index1 = XDMA_MAX_BD_CNT;
+ chan->bd_tail = 0;
+ }
+ }
+ }
+
+ if (start_index == -1) {
+ status = -EINVAL;
+ goto out_unlock;
+ }
+
+ bd->dmahead = (xlnk_intptr_type)dmahead;
+ bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
+ dmahead->last_bd_index = end_index2;
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_EOP;
+
+ /* Barrier to assert control word write commits */
+ wmb();
+
+ xdma_start_transfer(chan, start_index, end_index2);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+
+out_clean:
+ if (!end_index1) {
+ for (i = start_index; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ } else {
+ /* clean till the end of bd list first, and then 2nd end */
+ for (i = start_index; i < end_index1; i++)
+ xdma_clean_bd(chan->bds[i]);
+
+ end_index1 = 0;
+ for (i = end_index1; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ }
+ /* Move the bd_tail back */
+ chan->bd_tail = start_index;
+ chan->bd_used = bd_used_saved;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return status;
+}
+
+/*
+ * create minimal length scatter gather list for physically contiguous buffer
+ * that starts at phy_buf and has length phy_buf_len bytes
+ */
+static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
+ unsigned int phy_buf_len,
+ struct scatterlist *sgl)
+{
+ unsigned int sgl_cnt = 0;
+ struct scatterlist *sgl_head;
+ unsigned int dma_len;
+ unsigned int num_bd;
+
+ if (!phy_buf || !phy_buf_len) {
+ pr_err("phy_buf is NULL or phy_buf_len = 0\n");
+ return sgl_cnt;
+ }
+
+ num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
+ / XDMA_MAX_TRANS_LEN;
+ sgl_head = sgl;
+ sg_init_table(sgl, num_bd);
+
+ while (phy_buf_len > 0) {
+ xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
+ unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
+
+ sgl_cnt++;
+ if (sgl_cnt > XDMA_MAX_BD_CNT)
+ return 0;
+
+ dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
+ XDMA_MAX_TRANS_LEN : phy_buf_len;
+
+ sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
+ sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
+ sg_dma_len(sgl_head) = dma_len;
+ sgl_head = sg_next(sgl_head);
+
+ phy_buf += dma_len;
+ phy_buf_len -= dma_len;
+ }
+
+ return sgl_cnt;
+}
+
+/* merge sg list, sgl, with length sgl_len, to sgl_merged, to save dma bds */
+static unsigned int sgl_merge(struct scatterlist *sgl,
+ unsigned int sgl_len,
+ struct scatterlist *sgl_merged)
+{
+ struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
+ unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
+ unsigned int dma_len = 0;
+
+ sg_init_table(sgl_merged, sgl_len);
+ sg_merged_head = sgl_merged;
+ sghead = sgl;
+
+ while (sghead && (sg_visited_cnt < sgl_len)) {
+ dma_len = sg_dma_len(sghead);
+ sgend = sghead;
+ sg_visited_cnt++;
+ sgnext = sg_next(sgend);
+
+ while (sgnext && (sg_visited_cnt < sgl_len)) {
+ if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
+ sg_dma_address(sgnext))
+ break;
+
+ if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
+ break;
+
+ sgend = sgnext;
+ dma_len += sg_dma_len(sgend);
+ sg_visited_cnt++;
+ sgnext = sg_next(sgnext);
+ }
+
+ sg_merged_num++;
+ if (sg_merged_num > XDMA_MAX_BD_CNT)
+ return 0;
+
+ memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
+
+ sg_dma_len(sg_merged_head) = dma_len;
+
+ sg_merged_head = sg_next(sg_merged_head);
+ sghead = sg_next(sgend);
+ }
+
+ return sg_merged_num;
+}
+
+static int pin_user_pages(xlnk_intptr_type uaddr,
+ unsigned int ulen,
+ int write,
+ struct scatterlist **scatterpp,
+ unsigned int *cntp,
+ unsigned int user_flags)
+{
+ int status;
+ struct mm_struct *mm = current->mm;
+ unsigned int first_page;
+ unsigned int last_page;
+ unsigned int num_pages;
+ struct scatterlist *sglist;
+ struct page **mapped_pages;
+
+ unsigned int pgidx;
+ unsigned int pglen;
+ unsigned int pgoff;
+ unsigned int sublen;
+
+ first_page = uaddr / PAGE_SIZE;
+ last_page = (uaddr + ulen - 1) / PAGE_SIZE;
+ num_pages = last_page - first_page + 1;
+ mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
+ if (!mapped_pages)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ status = get_user_pages(uaddr, num_pages,
+ (write ? FOLL_WRITE : 0) | FOLL_FORCE,
+ mapped_pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (status == num_pages) {
+ sglist = kcalloc(num_pages,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!sglist) {
+ pr_err("%s: kcalloc failed to create sg list\n",
+ __func__);
+ vfree(mapped_pages);
+ return -ENOMEM;
+ }
+ sg_init_table(sglist, num_pages);
+ sublen = 0;
+ for (pgidx = 0; pgidx < status; pgidx++) {
+ if (pgidx == 0 && num_pages != 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = PAGE_SIZE - pgoff;
+ } else if (pgidx == 0 && num_pages == 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = ulen;
+ } else if (pgidx == num_pages - 1) {
+ pgoff = 0;
+ pglen = ulen - sublen;
+ } else {
+ pgoff = 0;
+ pglen = PAGE_SIZE;
+ }
+
+ sublen += pglen;
+
+ sg_set_page(&sglist[pgidx],
+ mapped_pages[pgidx],
+ pglen, pgoff);
+
+ sg_dma_len(&sglist[pgidx]) = pglen;
+ }
+
+ *scatterpp = sglist;
+ *cntp = num_pages;
+
+ vfree(mapped_pages);
+ return 0;
+ }
+ pr_err("Failed to pin user pages\n");
+ for (pgidx = 0; pgidx < status; pgidx++)
+ put_page(mapped_pages[pgidx]);
+ vfree(mapped_pages);
+ return -ENOMEM;
+}
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
+{
+ struct page *pg;
+ unsigned int i;
+
+ if (!sglist)
+ return 0;
+
+ for (i = 0; i < cnt; i++) {
+ pg = sg_page(sglist + i);
+ if (pg)
+ put_page(pg);
+ }
+
+ kfree(sglist);
+ return 0;
+}
+
+struct xdma_chan *xdma_request_channel(char *name)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (!strcmp(device->chan[i]->name, name))
+ return device->chan[i];
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(xdma_request_channel);
+
+void xdma_release_channel(struct xdma_chan *chan)
+{ }
+EXPORT_SYMBOL(xdma_release_channel);
+
+void xdma_release_all_channels(void)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (device->chan[i]->client_count) {
+ dma_halt(device->chan[i]);
+ xilinx_chan_desc_reinit(device->chan[i]);
+ pr_info("%s: chan %s freed\n",
+ __func__,
+ device->chan[i]->name);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(xdma_release_all_channels);
+
+static void xdma_release(struct device *dev)
+{
+}
+
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp)
+{
+ struct xdma_head *dmahead;
+ struct scatterlist *pagelist = NULL;
+ struct scatterlist *sglist = NULL;
+ unsigned int pagecnt = 0;
+ unsigned int sgcnt = 0;
+ enum dma_data_direction dmadir;
+ int status;
+ unsigned long attrs = 0;
+
+ dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
+ if (!dmahead)
+ return -ENOMEM;
+
+ dmahead->chan = chan;
+ dmahead->userbuf = userbuf;
+ dmahead->size = size;
+ dmahead->dmadir = chan->direction;
+ dmahead->userflag = user_flags;
+ dmahead->dmabuf = dp;
+ dmadir = chan->direction;
+
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (dp) {
+ int i;
+ struct scatterlist *sg;
+ unsigned int remaining_size = size;
+
+ if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
+ pr_err("%s dmabuf not mapped: %p\n",
+ __func__, dp->dbuf_sg_table);
+ return -EINVAL;
+ }
+ if (dp->dbuf_sg_table->nents == 0) {
+ pr_err("%s: cannot map a scatterlist with 0 entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ sglist = kmalloc_array(dp->dbuf_sg_table->nents,
+ sizeof(*sglist),
+ GFP_KERNEL);
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, dp->dbuf_sg_table->nents);
+ sgcnt = 0;
+ for_each_sg(dp->dbuf_sg_table->sgl,
+ sg,
+ dp->dbuf_sg_table->nents,
+ i) {
+ sg_set_page(sglist + i,
+ sg_page(sg),
+ sg_dma_len(sg),
+ sg->offset);
+ sg_dma_address(sglist + i) = sg_dma_address(sg);
+ if (remaining_size == 0) {
+ sg_dma_len(sglist + i) = 0;
+ } else if (sg_dma_len(sg) > remaining_size) {
+ sg_dma_len(sglist + i) = remaining_size;
+ sgcnt++;
+ } else {
+ sg_dma_len(sglist + i) = sg_dma_len(sg);
+ remaining_size -= sg_dma_len(sg);
+ sgcnt++;
+ }
+ }
+ dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
+ pagelist = NULL;
+ pagecnt = 0;
+ } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ size_t elem_cnt;
+
+ elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
+ sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
+ sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
+ if (!sgcnt)
+ return -ENOMEM;
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+
+ if (!status) {
+ pr_err("sg contiguous mapping failed\n");
+ return -ENOMEM;
+ }
+ pagelist = NULL;
+ pagecnt = 0;
+ } else {
+ status = pin_user_pages(userbuf,
+ size,
+ dmadir != DMA_TO_DEVICE,
+ &pagelist,
+ &pagecnt,
+ user_flags);
+ if (status < 0) {
+ pr_err("pin_user_pages failed\n");
+ return status;
+ }
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("dma_map_sg failed\n");
+ unpin_user_pages(pagelist, pagecnt);
+ return -ENOMEM;
+ }
+
+ sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
+ if (sglist)
+ sgcnt = sgl_merge(pagelist, pagecnt, sglist);
+ if (!sgcnt) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ kfree(sglist);
+ return -ENOMEM;
+ }
+ }
+ dmahead->sglist = sglist;
+ dmahead->sgcnt = sgcnt;
+ dmahead->pagelist = pagelist;
+ dmahead->pagecnt = pagecnt;
+
+ /* skipping config */
+ init_completion(&dmahead->cmp);
+
+ if (nappwords_i > XDMA_MAX_APPWORDS)
+ nappwords_i = XDMA_MAX_APPWORDS;
+
+ if (nappwords_o > XDMA_MAX_APPWORDS)
+ nappwords_o = XDMA_MAX_APPWORDS;
+
+ dmahead->nappwords_o = nappwords_o;
+
+ status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
+ dmadir, nappwords_i, appwords_i);
+ if (status) {
+ pr_err("setup hw desc failed\n");
+ if (dmahead->pagelist) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ } else if (!dp) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+ }
+ kfree(dmahead->sglist);
+ return -ENOMEM;
+ }
+
+ *dmaheadpp = dmahead;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_submit);
+
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags)
+{
+ struct xdma_chan *chan = dmahead->chan;
+ unsigned long attrs = 0;
+
+ if (chan->poll_mode) {
+ xilinx_chan_desc_cleanup(chan);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
+ if (!try_wait_for_completion(&dmahead->cmp))
+ return 0;
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ wait_for_completion(&dmahead->cmp);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ }
+ }
+
+ if (!dmahead->dmabuf) {
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->sglist,
+ dmahead->sgcnt,
+ dmahead->dmadir,
+ attrs);
+ } else {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->pagelist,
+ dmahead->pagecnt,
+ dmahead->dmadir,
+ attrs);
+ unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
+ }
+ }
+ kfree(dmahead->sglist);
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_wait);
+
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay)
+{
+ *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
+ *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_getconfig);
+
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay)
+{
+ unsigned long val;
+
+ if (dma_is_running(chan))
+ return -EBUSY;
+
+ val = DMA_IN(&chan->regs->cr);
+ val &= ~((0xff << XDMA_COALESCE_SHIFT) |
+ (0xff << XDMA_DELAY_SHIFT));
+ val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
+ (irq_delay << XDMA_DELAY_SHIFT));
+
+ DMA_OUT(&chan->regs->cr, val);
+ return 0;
+}
+EXPORT_SYMBOL(xdma_setconfig);
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+#if XLNK_SYS_BIT_WIDTH == 32
+ irq_data.args[1] = hwirq - 32; /* GIC SPI offset */
+#else
+ irq_data.args[1] = hwirq;
+#endif
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
+
+ return irq;
+}
+
+/* Brute-force probing for xilinx DMA
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_device *xdev;
+ struct resource *res;
+ int err, i, j;
+ struct xdma_chan *chan;
+ struct xdma_device_config *dma_config;
+ int dma_chan_dir;
+ int dma_chan_reg_offset;
+
+ pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
+ &pdev->dev, pdev->num_resources, pdev->id);
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+ xdev->dev = &pdev->dev;
+
+ /* Set this as configurable once HPC works */
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
+ dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
+
+ dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
+ if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
+ return -EFAULT;
+
+ /* Get the memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (!xdev->regs) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -EFAULT;
+ }
+
+ dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
+ pdev->id, &res->start);
+ dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
+ pdev->id, &xdev->regs);
+
+ /* Allocate the channels */
+
+ dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
+ for (i = 0; i < dma_config->channel_count; i++) {
+ chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ dma_chan_dir = strcmp(dma_config->channel_config[i].type,
+ "axi-dma-mm2s-channel") ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
+ 0 :
+ 0x30;
+
+ /* Initialize channel parameters */
+ chan->id = i;
+ chan->regs = xdev->regs + dma_chan_reg_offset;
+ /* chan->regs = xdev->regs; */
+ chan->dev = xdev->dev;
+ chan->max_len = XDMA_MAX_TRANS_LEN;
+ chan->direction = dma_chan_dir;
+ sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
+ pr_info(" chan %d name: %s\n", chan->id, chan->name);
+ pr_info(" chan %d direction: %s\n", chan->id,
+ dma_chan_dir == DMA_FROM_DEVICE ?
+ "FROM_DEVICE" : "TO_DEVICE");
+
+ spin_lock_init(&chan->lock);
+ tasklet_init(&chan->tasklet,
+ xdma_tasklet,
+ (unsigned long)chan);
+ tasklet_init(&chan->dma_err_tasklet,
+ xdma_err_tasklet,
+ (unsigned long)chan);
+
+ xdev->chan[chan->id] = chan;
+
+ /* The IRQ resource */
+ chan->irq = xlate_irq(dma_config->channel_config[i].irq);
+ if (chan->irq <= 0) {
+ pr_err("get_resource for IRQ for dev %d failed\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(&pdev->dev,
+ chan->irq,
+ dma_chan_dir == DMA_TO_DEVICE ?
+ xdma_tx_intr_handler :
+ xdma_rx_intr_handler,
+ IRQF_SHARED,
+ pdev->name,
+ chan);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return err;
+ }
+ pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
+
+ chan->poll_mode = dma_config->channel_config[i].poll_mode;
+ pr_info(" chan%d poll mode: %s\n",
+ chan->id,
+ chan->poll_mode ? "on" : "off");
+
+ /* Allocate channel BD's */
+ err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate BD's\n");
+ return -ENOMEM;
+ }
+ pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
+ chan->id,
+ (void *)chan->bd_phys_addr,
+ chan->bd_chain_size);
+
+ err = dma_init(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "DMA init failed\n");
+ /* FIXME Check this - unregister all chan resources */
+ for (j = 0; j <= i; j++)
+ xdma_free_chan_resources(xdev->chan[j]);
+ return -EIO;
+ }
+ }
+ xdev->channel_count = dma_config->channel_count;
+ pdev->dev.release = xdma_release;
+ /* Add the DMA device to the global list */
+ mutex_lock(&dma_list_mutex);
+ list_add_tail(&xdev->node, &dma_device_list);
+ mutex_unlock(&dma_list_mutex);
+
+ platform_set_drvdata(pdev, xdev);
+
+ return 0;
+}
+
+static int xdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ /* Remove the DMA device from the global list */
+ mutex_lock(&dma_list_mutex);
+ list_del(&xdev->node);
+ mutex_unlock(&dma_list_mutex);
+
+ for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (xdev->chan[i])
+ xdma_free_chan_resources(xdev->chan[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver xdma_driver = {
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+ .driver = {
+ .name = "xilinx-axidma",
+ },
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("Xilinx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xilinx-dma-apf.h b/drivers/staging/apf/xilinx-dma-apf.h
new file mode 100644
index 000000000000..8837fec01779
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.h
@@ -0,0 +1,234 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __XILINX_DMA_APF_H
+#define __XILINX_DMA_APF_H
+
+/* ioctls */
+#include <linux/ioctl.h>
+
+/* tasklet */
+#include <linux/interrupt.h>
+
+/* dma stuff */
+#include <linux/dma-mapping.h>
+
+/* xlnk structures */
+#include "xlnk.h"
+#include "xlnk-sysdef.h"
+
+#define XDMA_IOC_MAGIC 'X'
+#define XDMA_IOCRESET _IO(XDMA_IOC_MAGIC, 0)
+#define XDMA_IOCREQUEST _IOWR(XDMA_IOC_MAGIC, 1, unsigned long)
+#define XDMA_IOCRELEASE _IOWR(XDMA_IOC_MAGIC, 2, unsigned long)
+#define XDMA_IOCSUBMIT _IOWR(XDMA_IOC_MAGIC, 3, unsigned long)
+#define XDMA_IOCWAIT _IOWR(XDMA_IOC_MAGIC, 4, unsigned long)
+#define XDMA_IOCGETCONFIG _IOWR(XDMA_IOC_MAGIC, 5, unsigned long)
+#define XDMA_IOCSETCONFIG _IOWR(XDMA_IOC_MAGIC, 6, unsigned long)
+#define XDMA_IOC_MAXNR 6
+
+/* Specific hardware configuration-related constants
+ */
+#define XDMA_RESET_LOOP 1000000
+#define XDMA_HALT_LOOP 1000000
+#define XDMA_NO_CHANGE 0xFFFF
+
+/* General register bits definitions
+ */
+#define XDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
+
+#define XDMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
+#define XDMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
+
+#define XDMA_SR_ERR_INTERNAL_MASK 0x00000010/* Datamover internal err */
+#define XDMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
+#define XDMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
+#define XDMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
+#define XDMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
+#define XDMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
+#define XDMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
+
+#define XDMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
+#define XDMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XDMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XDMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+#define XDMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XDMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XDMA_DELAY_SHIFT 24
+#define XDMA_COALESCE_SHIFT 16
+
+#define XDMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
+#define XDMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
+
+/* BD definitions for Axi DMA
+ */
+#define XDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
+#define XDMA_BD_STS_COMPL_MASK 0x80000000
+#define XDMA_BD_STS_ERR_MASK 0x70000000
+#define XDMA_BD_STS_ALL_MASK 0xF0000000
+
+/* DMA BD special bits definitions
+ */
+#define XDMA_BD_SOP 0x08000000 /* Start of packet bit */
+#define XDMA_BD_EOP 0x04000000 /* End of packet bit */
+
+/* BD Software Flag definitions for Axi DMA
+ */
+#define XDMA_BD_SF_POLL_MODE_MASK 0x00000002
+#define XDMA_BD_SF_SW_DONE_MASK 0x00000001
+
+/* driver defines */
+#define XDMA_MAX_BD_CNT 16384
+#define XDMA_MAX_CHANS_PER_DEVICE 2
+#define XDMA_MAX_TRANS_LEN 0x7FF000
+#define XDMA_MAX_APPWORDS 5
+#define XDMA_BD_CLEANUP_THRESHOLD ((XDMA_MAX_BD_CNT * 8) / 10)
+
+#define XDMA_FLAGS_WAIT_COMPLETE 1
+#define XDMA_FLAGS_TRYWAIT 2
+
+/* Platform data definition until ARM supports device tree */
+struct xdma_channel_config {
+ char *type;
+ unsigned int include_dre;
+ unsigned int datawidth;
+ unsigned int max_burst_len;
+ unsigned int irq;
+ unsigned int poll_mode;
+ unsigned int lite_mode;
+};
+
+struct xdma_device_config {
+ char *type;
+ char *name;
+ unsigned int include_sg;
+ unsigned int sg_include_stscntrl_strm; /* dma only */
+ unsigned int channel_count;
+ struct xdma_channel_config *channel_config;
+};
+
+struct xdma_desc_hw {
+ xlnk_intptr_type next_desc; /* 0x00 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad1; /* 0x04 */
+#endif
+ xlnk_intptr_type src_addr; /* 0x08 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad2; /* 0x0c */
+#endif
+ u32 addr_vsize; /* 0x10 */
+ u32 hsize; /* 0x14 */
+ u32 control; /* 0x18 */
+ u32 status; /* 0x1c */
+ u32 app[5]; /* 0x20 */
+ xlnk_intptr_type dmahead;
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 Reserved0;
+#endif
+ u32 sw_flag; /* 0x3C */
+} __aligned(64);
+
+/* shared by all Xilinx DMA engines */
+struct xdma_regs {
+ u32 cr; /* 0x00 Control Register */
+ u32 sr; /* 0x04 Status Register */
+ u32 cdr; /* 0x08 Current Descriptor Register */
+ u32 cdr_hi;
+ u32 tdr; /* 0x10 Tail Descriptor Register */
+ u32 tdr_hi;
+ u32 src; /* 0x18 Source Address Register (cdma) */
+ u32 src_hi;
+ u32 dst; /* 0x20 Destination Address Register (cdma) */
+ u32 dst_hi;
+ u32 btt_ref; /* 0x28 Bytes To Transfer (cdma) or
+ * park_ref (vdma)
+ */
+ u32 version; /* 0x2c version (vdma) */
+};
+
+/* Per DMA specific operations should be embedded in the channel structure */
+struct xdma_chan {
+ char name[64];
+ struct xdma_regs __iomem *regs;
+ struct device *dev; /* The dma device */
+ struct xdma_desc_hw *bds[XDMA_MAX_BD_CNT];
+ dma_addr_t bd_phys_addr;
+ u32 bd_chain_size;
+ int bd_cur;
+ int bd_tail;
+ unsigned int bd_used; /* # of BDs passed to hw chan */
+ enum dma_data_direction direction; /* Transfer direction */
+ int id; /* Channel ID */
+ int irq; /* Channel IRQ */
+ int poll_mode; /* Poll mode turned on? */
+ spinlock_t lock; /* Descriptor operation lock */
+ struct tasklet_struct tasklet; /* Cleanup work after irq */
+ struct tasklet_struct dma_err_tasklet; /* Cleanup work after irq */
+ int max_len; /* Maximum len per transfer */
+ int err; /* Channel has errors */
+ int client_count;
+};
+
+struct xdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct list_head node;
+ struct xdma_chan *chan[XDMA_MAX_CHANS_PER_DEVICE];
+ u8 channel_count;
+};
+
+struct xdma_head {
+ xlnk_intptr_type userbuf;
+ unsigned int size;
+ unsigned int dmaflag;
+ enum dma_data_direction dmadir;
+ struct scatterlist *sglist;
+ unsigned int sgcnt;
+ struct scatterlist *pagelist;
+ unsigned int pagecnt;
+ struct completion cmp;
+ struct xdma_chan *chan;
+ unsigned int nappwords_o;
+ u32 appwords_o[XDMA_MAX_APPWORDS];
+ unsigned int userflag;
+ u32 last_bd_index;
+ struct xlnk_dmabuf_reg *dmabuf;
+};
+
+struct xdma_chan *xdma_request_channel(char *name);
+void xdma_release_channel(struct xdma_chan *chan);
+void xdma_release_all_channels(void);
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp);
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags);
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay);
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay);
+unsigned int xlate_irq(unsigned int hwirq);
+
+#endif
diff --git a/drivers/staging/apf/xlnk-eng.c b/drivers/staging/apf/xlnk-eng.c
new file mode 100644
index 000000000000..bc40128e93cf
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.c
@@ -0,0 +1,242 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uio_driver.h>
+
+
+#include "xlnk-eng.h"
+
+static DEFINE_MUTEX(xlnk_eng_list_mutex);
+static LIST_HEAD(xlnk_eng_list);
+
+int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_add_tail(&xlnk_dev->global_node, &xlnk_eng_list);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(xlnk_eng_register_device);
+
+
+void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_del(&xlnk_dev->global_node);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+}
+EXPORT_SYMBOL(xlnk_eng_unregister_device);
+
+struct xlnk_eng_device *xlnk_eng_request_by_name(char *name)
+{
+ struct xlnk_eng_device *device, *_d;
+ int found = 0;
+
+ mutex_lock(&xlnk_eng_list_mutex);
+
+ list_for_each_entry_safe(device, _d, &xlnk_eng_list, global_node) {
+ if (!strcmp(dev_name(device->dev), name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ device = device->alloc(device);
+ else
+ device = NULL;
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return device;
+}
+EXPORT_SYMBOL(xlnk_eng_request_by_name);
+
+/**
+ * struct xilinx_xlnk_eng_device - device structure for xilinx_xlnk_eng
+ * @common: common device info
+ * @base: base address for device
+ * @lock: lock used by device
+ * @cnt: usage count
+ * @info: info for registering and unregistering uio device
+ */
+struct xilinx_xlnk_eng_device {
+ struct xlnk_eng_device common;
+ void __iomem *base;
+ spinlock_t lock;
+ int cnt;
+ struct uio_info *info;
+};
+
+static void xlnk_eng_release(struct device *dev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *xlnk_dev;
+
+ xdev = dev_get_drvdata(dev);
+ xlnk_dev = &xdev->common;
+ if (!xlnk_dev)
+ return;
+
+ xlnk_dev->free(xlnk_dev);
+}
+
+#define DRIVER_NAME "xilinx-xlnk-eng"
+
+#define to_xilinx_xlnk(dev) container_of(dev, \
+ struct xilinx_xlnk_eng_device, common)
+
+static struct xlnk_eng_device *xilinx_xlnk_alloc(
+ struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *retdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ if (xdev->cnt == 0) {
+ xdev->cnt++;
+ retdev = xlnkdev;
+ } else
+ retdev = NULL;
+
+ return retdev;
+}
+
+static void xilinx_xlnk_free(struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ xdev->cnt = 0;
+}
+
+static int xlnk_eng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xilinx_xlnk_eng_device *xdev;
+ struct uio_info *info;
+ char *devname;
+
+ pr_info("xlnk_eng_probe ...\n");
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+
+ /* more error handling */
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ xdev->info = info;
+ devname = devm_kzalloc(&pdev->dev, 64, GFP_KERNEL);
+ if (!devname) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ sprintf(devname, "%s.%d", DRIVER_NAME, pdev->id);
+ pr_info("uio name %s\n", devname);
+ /* iomap registers */
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+
+ /* %pa types should be used here */
+ dev_info(&pdev->dev, "physical base : 0x%lx\n",
+ (unsigned long)res->start);
+ dev_info(&pdev->dev, "register range : 0x%lx\n",
+ (unsigned long)resource_size(res));
+ dev_info(&pdev->dev, "base remapped to: 0x%lx\n",
+ (unsigned long)xdev->base);
+ if (!xdev->base) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -ENOMEM;
+ }
+
+ info->mem[0].addr = res->start;
+ info->mem[0].size = resource_size(res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[0].internal_addr = xdev->base;
+
+ /* info->name = DRIVER_NAME; */
+ info->name = devname;
+ info->version = "0.0.1";
+
+ info->irq = -1;
+
+ xdev->common.dev = &pdev->dev;
+
+ xdev->common.alloc = xilinx_xlnk_alloc;
+ xdev->common.free = xilinx_xlnk_free;
+ xdev->common.dev->release = xlnk_eng_release;
+
+ dev_set_drvdata(&pdev->dev, xdev);
+
+ spin_lock_init(&xdev->lock);
+
+ xdev->cnt = 0;
+
+ xlnk_eng_register_device(&xdev->common);
+
+ if (uio_register_device(&pdev->dev, info)) {
+ dev_err(&pdev->dev, "uio_register_device failed\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio registered\n");
+
+ return 0;
+}
+
+static int xlnk_eng_remove(struct platform_device *pdev)
+{
+ struct uio_info *info;
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = dev_get_drvdata(&pdev->dev);
+ info = xdev->info;
+
+ uio_unregister_device(info);
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio unregistered\n");
+ xlnk_eng_unregister_device(&xdev->common);
+
+ return 0;
+}
+
+static struct platform_driver xlnk_eng_driver = {
+ .probe = xlnk_eng_probe,
+ .remove = xlnk_eng_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(xlnk_eng_driver);
+
+MODULE_DESCRIPTION("Xilinx xlnk engine generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk-eng.h b/drivers/staging/apf/xlnk-eng.h
new file mode 100644
index 000000000000..9f9519664705
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.h
@@ -0,0 +1,33 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#ifndef XLNK_ENG_H
+#define XLNK_ENG_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+
+struct xlnk_eng_device {
+ struct list_head global_node;
+ struct xlnk_eng_device * (*alloc)(struct xlnk_eng_device *xdev);
+ void (*free)(struct xlnk_eng_device *xdev);
+ struct device *dev;
+};
+extern int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev);
+extern void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev);
+extern struct xlnk_eng_device *xlnk_eng_request_by_name(char *name);
+
+#endif
+
diff --git a/drivers/staging/apf/xlnk-ioctl.h b/drivers/staging/apf/xlnk-ioctl.h
new file mode 100644
index 000000000000..d909fa65459f
--- /dev/null
+++ b/drivers/staging/apf/xlnk-ioctl.h
@@ -0,0 +1,37 @@
+#ifndef _XLNK_IOCTL_H
+#define _XLNK_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define XLNK_IOC_MAGIC 'X'
+
+#define XLNK_IOCRESET _IO(XLNK_IOC_MAGIC, 0)
+
+#define XLNK_IOCALLOCBUF _IOWR(XLNK_IOC_MAGIC, 2, unsigned long)
+#define XLNK_IOCFREEBUF _IOWR(XLNK_IOC_MAGIC, 3, unsigned long)
+#define XLNK_IOCADDDMABUF _IOWR(XLNK_IOC_MAGIC, 4, unsigned long)
+#define XLNK_IOCCLEARDMABUF _IOWR(XLNK_IOC_MAGIC, 5, unsigned long)
+
+#define XLNK_IOCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 7, unsigned long)
+#define XLNK_IOCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 8, unsigned long)
+#define XLNK_IOCDMAWAIT _IOWR(XLNK_IOC_MAGIC, 9, unsigned long)
+#define XLNK_IOCDMARELEASE _IOWR(XLNK_IOC_MAGIC, 10, unsigned long)
+
+#define XLNK_IOCMEMOP _IOWR(XLNK_IOC_MAGIC, 25, unsigned long)
+#define XLNK_IOCDEVREGISTER _IOWR(XLNK_IOC_MAGIC, 16, unsigned long)
+#define XLNK_IOCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 17, unsigned long)
+#define XLNK_IOCDEVUNREGISTER _IOWR(XLNK_IOC_MAGIC, 18, unsigned long)
+#define XLNK_IOCCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 19, unsigned long)
+#define XLNK_IOCCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 20, unsigned long)
+#define XLNK_IOCMCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 23, unsigned long)
+#define XLNK_IOCCACHECTRL _IOWR(XLNK_IOC_MAGIC, 24, unsigned long)
+
+#define XLNK_IOCIRQREGISTER _IOWR(XLNK_IOC_MAGIC, 35, unsigned long)
+#define XLNK_IOCIRQUNREGISTER _IOWR(XLNK_IOC_MAGIC, 36, unsigned long)
+#define XLNK_IOCIRQWAIT _IOWR(XLNK_IOC_MAGIC, 37, unsigned long)
+
+#define XLNK_IOCSHUTDOWN _IOWR(XLNK_IOC_MAGIC, 100, unsigned long)
+#define XLNK_IOCRECRES _IOWR(XLNK_IOC_MAGIC, 101, unsigned long)
+#define XLNK_IOC_MAXNR 101
+
+#endif
diff --git a/drivers/staging/apf/xlnk-sysdef.h b/drivers/staging/apf/xlnk-sysdef.h
new file mode 100644
index 000000000000..b6334be3b9c4
--- /dev/null
+++ b/drivers/staging/apf/xlnk-sysdef.h
@@ -0,0 +1,34 @@
+#ifndef XLNK_SYSDEF_H
+#define XLNK_SYSDEF_H
+
+#if __SIZEOF_POINTER__ == 4
+ #define XLNK_SYS_BIT_WIDTH 32
+#elif __SIZEOF_POINTER__ == 8
+ #define XLNK_SYS_BIT_WIDTH 64
+#endif
+
+#include <linux/types.h>
+
+#if XLNK_SYS_BIT_WIDTH == 32
+
+ typedef u32 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#elif XLNK_SYS_BIT_WIDTH == 64
+
+ typedef u64 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#else
+ #error "Please define application bit width and system bit width"
+#endif
+
+#endif
diff --git a/drivers/staging/apf/xlnk.c b/drivers/staging/apf/xlnk.c
new file mode 100644
index 000000000000..4701898cc5ec
--- /dev/null
+++ b/drivers/staging/apf/xlnk.c
@@ -0,0 +1,1580 @@
+/*
+ * xlnk.c
+ *
+ * Xilinx Accelerator driver support.
+ *
+ * Copyright (C) 2010 Xilinx Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/dma-buf.h>
+
+#include <linux/string.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/dmaengine.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h> /* error codes */
+#include <linux/dma-mapping.h> /* dma */
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/uio_driver.h>
+#include <asm/cacheflush.h>
+#include <linux/semaphore.h>
+
+#include "xlnk-ioctl.h"
+#include "xlnk-sysdef.h"
+#include "xlnk.h"
+
+#ifdef CONFIG_XILINX_DMA_APF
+#include "xilinx-dma-apf.h"
+#endif
+
+#define DRIVER_NAME "xlnk"
+#define DRIVER_VERSION "0.2"
+
+static struct platform_device *xlnk_pdev;
+static struct device *xlnk_dev;
+
+static struct cdev xlnk_cdev;
+
+static struct class *xlnk_class;
+
+static s32 driver_major;
+
+static char *driver_name = DRIVER_NAME;
+
+static void *xlnk_dev_buf;
+static ssize_t xlnk_dev_size;
+static int xlnk_dev_vmas;
+
+#define XLNK_BUF_POOL_SIZE 4096
+static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
+static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
+static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
+static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
+static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
+static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
+static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
+static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
+static spinlock_t xlnk_buf_lock;
+
+#define XLNK_IRQ_POOL_SIZE 256
+static struct xlnk_irq_control *xlnk_irq_set[XLNK_IRQ_POOL_SIZE];
+static spinlock_t xlnk_irq_lock;
+
+static int xlnk_open(struct inode *ip, struct file *filp);
+static int xlnk_release(struct inode *ip, struct file *filp);
+static long xlnk_ioctl(struct file *filp, unsigned int code,
+ unsigned long args);
+static ssize_t xlnk_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp);
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
+static void xlnk_vma_open(struct vm_area_struct *vma);
+static void xlnk_vma_close(struct vm_area_struct *vma);
+
+static int xlnk_init_bufpool(void);
+static void xlnk_init_irqpool(void);
+
+LIST_HEAD(xlnk_dmabuf_list);
+
+static int xlnk_shutdown(unsigned long buf);
+static int xlnk_recover_resource(unsigned long buf);
+
+static const struct file_operations xlnk_fops = {
+ .open = xlnk_open,
+ .release = xlnk_release,
+ .read = xlnk_read,
+ .write = xlnk_write,
+ .unlocked_ioctl = xlnk_ioctl,
+ .mmap = xlnk_mmap,
+};
+
+#define MAX_XLNK_DMAS 128
+
+struct xlnk_device_pack {
+ char name[64];
+ struct platform_device pdev;
+ struct resource res[8];
+ struct uio_info *io_ptr;
+ int refs;
+
+#ifdef CONFIG_XILINX_DMA_APF
+ struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
+ struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
+#endif
+};
+
+static struct semaphore xlnk_devpack_sem;
+static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
+static void xlnk_devpacks_init(void)
+{
+ unsigned int i;
+
+ sema_init(&xlnk_devpack_sem, 1);
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ xlnk_devpacks[i] = NULL;
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (!xlnk_devpacks[i]) {
+ struct xlnk_device_pack *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ ret->pdev.id = i;
+ xlnk_devpacks[i] = ret;
+
+ return ret;
+ }
+ }
+
+ return NULL;
+}
+
+static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ if (xlnk_devpacks[i] == devpack)
+ xlnk_devpacks[i] = NULL;
+ kfree(devpack);
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (xlnk_devpacks[i] &&
+ xlnk_devpacks[i]->res[0].start == base)
+ return xlnk_devpacks[i];
+ }
+ return NULL;
+}
+
+static void xlnk_devpacks_free(xlnk_intptr_type base)
+{
+ struct xlnk_device_pack *devpack;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ devpack->refs--;
+ if (devpack->refs) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ platform_device_unregister(&devpack->pdev);
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ up(&xlnk_devpack_sem);
+}
+
+static void xlnk_devpacks_free_all(void)
+{
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ devpack = xlnk_devpacks[i];
+ if (devpack) {
+ if (devpack->io_ptr) {
+ uio_unregister_device(devpack->io_ptr);
+ kfree(devpack->io_ptr);
+ } else {
+ platform_device_unregister(&devpack->pdev);
+ }
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ }
+ }
+}
+
+static int xlnk_probe(struct platform_device *pdev)
+{
+ int err;
+ dev_t dev = 0;
+
+ xlnk_dev_buf = NULL;
+ xlnk_dev_size = 0;
+ xlnk_dev_vmas = 0;
+
+ /* use 2.6 device model */
+ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "%s: Can't get major %d\n",
+ __func__, driver_major);
+ goto err1;
+ }
+
+ cdev_init(&xlnk_cdev, &xlnk_fops);
+
+ xlnk_cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&xlnk_cdev, dev, 1);
+
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
+ __func__);
+ goto err3;
+ }
+
+ /* udev support */
+ xlnk_class = class_create(THIS_MODULE, "xlnk");
+ if (IS_ERR(xlnk_class)) {
+ dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
+ goto err3;
+ }
+
+ driver_major = MAJOR(dev);
+
+ dev_info(&pdev->dev, "Major %d\n", driver_major);
+
+ device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
+ NULL, "xlnk");
+
+ err = xlnk_init_bufpool();
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to allocate buffer pool\n",
+ __func__);
+ goto err3;
+ }
+
+ xlnk_init_irqpool();
+
+ dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
+
+ xlnk_pdev = pdev;
+ xlnk_dev = &pdev->dev;
+
+ if (xlnk_pdev)
+ dev_info(&pdev->dev, "xlnk_pdev is not null\n");
+ else
+ dev_info(&pdev->dev, "xlnk_pdev is null\n");
+
+ xlnk_devpacks_init();
+
+ return 0;
+err3:
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(dev, 1);
+err1:
+ return err;
+}
+
+static int xlnk_buf_findnull(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (!xlnk_bufpool[i])
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_phyaddr[i] <= addr &&
+ xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_buf_process[i] == pid &&
+ xlnk_userbuf[i] <= addr &&
+ xlnk_userbuf[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+/*
+ * allocate and return an id
+ * id must be a positve number
+ */
+static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
+{
+ int id;
+ void *kaddr;
+ dma_addr_t phys_addr_anchor;
+ unsigned long attrs;
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ kaddr = dma_alloc_attrs(xlnk_dev,
+ len,
+ &phys_addr_anchor,
+ GFP_KERNEL | GFP_DMA,
+ attrs);
+ if (!kaddr)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ id = xlnk_buf_findnull();
+ if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
+ xlnk_bufpool_alloc_point[id] = kaddr;
+ xlnk_bufpool[id] = kaddr;
+ xlnk_buflen[id] = len;
+ xlnk_bufcacheable[id] = cacheable;
+ xlnk_phyaddr[id] = phys_addr_anchor;
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
+ return -ENOMEM;
+
+ return id;
+}
+
+static int xlnk_init_bufpool(void)
+{
+ unsigned int i;
+
+ spin_lock_init(&xlnk_buf_lock);
+ xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
+ *((char *)xlnk_dev_buf) = '\0';
+
+ if (!xlnk_dev_buf) {
+ dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ xlnk_bufpool[0] = xlnk_dev_buf;
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_bufpool[i] = NULL;
+
+ return 0;
+}
+
+static void xlnk_init_irqpool(void)
+{
+ int i;
+
+ spin_lock_init(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++)
+ xlnk_irq_set[i] = NULL;
+}
+
+#define XLNK_SUSPEND NULL
+#define XLNK_RESUME NULL
+
+static int xlnk_remove(struct platform_device *pdev)
+{
+ dev_t devno;
+
+ kfree(xlnk_dev_buf);
+ xlnk_dev_buf = NULL;
+
+ devno = MKDEV(driver_major, 0);
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(devno, 1);
+ if (xlnk_class) {
+ /* remove the device from sysfs */
+ device_destroy(xlnk_class, MKDEV(driver_major, 0));
+ class_destroy(xlnk_class);
+ }
+
+ xlnk_devpacks_free_all();
+
+ return 0;
+}
+
+static const struct of_device_id xlnk_match[] = {
+ { .compatible = "xlnx,xlnk-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnk_match);
+
+static struct platform_driver xlnk_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnk_match,
+ },
+ .probe = xlnk_probe,
+ .remove = xlnk_remove,
+ .suspend = XLNK_SUSPEND,
+ .resume = XLNK_RESUME,
+};
+
+static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
+
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int xlnk_open(struct inode *ip, struct file *filp)
+{
+ if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
+ xlnk_dev_size = 0;
+
+ return 0;
+}
+
+static ssize_t xlnk_read(struct file *filp,
+ char __user *buf,
+ size_t count,
+ loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (*offp >= xlnk_dev_size)
+ goto out;
+
+ if (*offp + count > xlnk_dev_size)
+ count = xlnk_dev_size - *offp;
+
+ if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ out:
+ return retval;
+}
+
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ if (xlnk_dev_size < *offp)
+ xlnk_dev_size = *offp;
+
+ out:
+ return retval;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int xlnk_release(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static int xlnk_devregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int *irqs,
+ xlnk_intptr_type *handle)
+{
+ unsigned int nres;
+ unsigned int nirq;
+ unsigned int *irqptr;
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+ int status;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ nirq = 0;
+ irqptr = irqs;
+
+ while (*irqptr) {
+ nirq++;
+ irqptr++;
+ }
+
+ if (nirq > 7) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+
+ nres = nirq + 1;
+
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ pr_err("Failed to allocate device %s\n", name);
+ return -ENOMEM;
+ }
+ devpack->io_ptr = NULL;
+ strcpy(devpack->name, name);
+ devpack->pdev.name = devpack->name;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ for (i = 0; i < nirq; i++) {
+ devpack->res[i + 1].start = irqs[i];
+ devpack->res[i + 1].end = irqs[i];
+ devpack->res[i + 1].flags = IORESOURCE_IRQ;
+ }
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = nres;
+
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+ return status;
+}
+
+static int xlnk_dmaregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int chan_num,
+ unsigned int chan0_dir,
+ unsigned int chan0_irq,
+ unsigned int chan0_poll_mode,
+ unsigned int chan0_include_dre,
+ unsigned int chan0_data_width,
+ unsigned int chan1_dir,
+ unsigned int chan1_irq,
+ unsigned int chan1_poll_mode,
+ unsigned int chan1_include_dre,
+ unsigned int chan1_data_width,
+ xlnk_intptr_type *handle)
+{
+ int status = 0;
+
+#ifdef CONFIG_XILINX_DMA_APF
+
+ struct xlnk_device_pack *devpack;
+
+ if (chan_num < 1 || chan_num > 2) {
+ pr_err("%s: Expected either 1 or 2 channels, got %d\n",
+ __func__, chan_num);
+ return -EINVAL;
+ }
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+ strcpy(devpack->name, name);
+ devpack->pdev.name = "xilinx-axidma";
+
+ devpack->io_ptr = NULL;
+
+ devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
+ devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
+ devpack->dma_chan_cfg[0].irq = chan0_irq;
+ devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
+ devpack->dma_chan_cfg[0].type =
+ (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+
+ if (chan_num > 1) {
+ devpack->dma_chan_cfg[1].include_dre =
+ chan1_include_dre;
+ devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
+ devpack->dma_chan_cfg[1].irq = chan1_irq;
+ devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
+ devpack->dma_chan_cfg[1].type =
+ (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+ }
+
+ devpack->dma_dev_cfg.name = devpack->name;
+ devpack->dma_dev_cfg.type = "axi-dma";
+ devpack->dma_dev_cfg.include_sg = 1;
+ devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
+ devpack->dma_dev_cfg.channel_count = chan_num;
+ devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
+
+ devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = 1;
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+#endif
+ return status;
+}
+
+static int xlnk_allocbuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_int_type id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = xlnk_allocbuf(temp_args.allocbuf.len,
+ temp_args.allocbuf.cacheable);
+
+ if (id <= 0)
+ return -ENOMEM;
+
+ temp_args.allocbuf.id = id;
+ temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args));
+
+ return status;
+}
+
+static int xlnk_freebuf(int id)
+{
+ void *alloc_point;
+ dma_addr_t p_addr;
+ size_t buf_len;
+ int cacheable;
+ unsigned long attrs;
+
+ if (id <= 0 || id >= xlnk_bufpool_size)
+ return -ENOMEM;
+
+ if (!xlnk_bufpool[id])
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ alloc_point = xlnk_bufpool_alloc_point[id];
+ p_addr = xlnk_phyaddr[id];
+ buf_len = xlnk_buflen[id];
+ xlnk_bufpool[id] = NULL;
+ xlnk_phyaddr[id] = (dma_addr_t)NULL;
+ xlnk_buflen[id] = 0;
+ cacheable = xlnk_bufcacheable[id];
+ xlnk_bufcacheable[id] = 0;
+ spin_unlock(&xlnk_buf_lock);
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ dma_free_attrs(xlnk_dev,
+ buf_len,
+ alloc_point,
+ p_addr,
+ attrs);
+
+ return 0;
+}
+
+static void xlnk_free_all_buf(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_freebuf(i);
+}
+
+static int xlnk_freebuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = temp_args.freebuf.id;
+ return xlnk_freebuf(id);
+}
+
+static int xlnk_adddmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *db;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry(db, &xlnk_dmabuf_list, list) {
+ if (db->user_vaddr == temp_args.dmasubmit.buf) {
+ pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+ spin_unlock(&xlnk_buf_lock);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
+ db->user_vaddr = temp_args.dmabuf.user_addr;
+ db->dbuf = dma_buf_get(db->dmabuf_fd);
+ db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
+ if (IS_ERR(db->dbuf_attach)) {
+ dma_buf_put(db->dbuf);
+ pr_err("Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
+ DMA_BIDIRECTIONAL);
+
+ if (!db->dbuf_sg_table) {
+ pr_err("Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(db->dbuf, db->dbuf_attach);
+ dma_buf_put(db->dbuf);
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ INIT_LIST_HEAD(&db->list);
+ list_add_tail(&db->list, &xlnk_dmabuf_list);
+ spin_unlock(&xlnk_buf_lock);
+
+ return 0;
+}
+
+static int xlnk_cleardmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *dp, *dp_temp;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
+ dma_buf_unmap_attachment(dp->dbuf_attach,
+ dp->dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(dp->dbuf, dp->dbuf_attach);
+ dma_buf_put(dp->dbuf);
+ list_del(&dp->list);
+ spin_unlock(&xlnk_buf_lock);
+ kfree(dp);
+ return 0;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+ pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+
+ return 1;
+}
+
+static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ int status;
+ struct xdma_chan *chan;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmarequest.name[0])
+ return 0;
+
+ down(&xlnk_devpack_sem);
+ chan = xdma_request_channel(temp_args.dmarequest.name);
+ up(&xlnk_devpack_sem);
+ if (!chan)
+ return -ENOMEM;
+ temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
+ temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
+ temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
+
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+ struct xlnk_dmabuf_reg *dp, *cp = NULL;
+ int buf_id;
+ void *kaddr = NULL;
+ int status = -1;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmasubmit.dmachan)
+ return -ENODEV;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
+ if (buf_id) {
+ xlnk_intptr_type addr_delta =
+ temp_args.dmasubmit.buf -
+ xlnk_phyaddr[buf_id];
+ kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
+ } else {
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmasubmit.buf) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ status = xdma_submit((struct xdma_chan *)
+ (temp_args.dmasubmit.dmachan),
+ temp_args.dmasubmit.buf,
+ kaddr,
+ temp_args.dmasubmit.len,
+ temp_args.dmasubmit.nappwords_i,
+ temp_args.dmasubmit.appwords_i,
+ temp_args.dmasubmit.nappwords_o,
+ temp_args.dmasubmit.flag,
+ &dmahead,
+ cp);
+
+ temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
+ temp_args.dmasubmit.last_bd_index =
+ (xlnk_intptr_type)dmahead->last_bd_index;
+
+ if (!status) {
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+ }
+ return status;
+#endif
+ return -ENOMEM;
+}
+
+static int xlnk_dmawait_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
+ status = xdma_wait(dmahead,
+ dmahead->userflag,
+ &temp_args.dmawait.flags);
+ if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
+ if (temp_args.dmawait.nappwords) {
+ memcpy(temp_args.dmawait.appwords,
+ dmahead->appwords_o,
+ dmahead->nappwords_o * sizeof(u32));
+ }
+ kfree(dmahead);
+ }
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+#endif
+
+ return status;
+}
+
+static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+ down(&xlnk_devpack_sem);
+ xdma_release_channel((struct xdma_chan *)
+ (temp_args.dmarelease.dmachan));
+ up(&xlnk_devpack_sem);
+#endif
+
+ return status;
+}
+
+static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_devregister(temp_args.devregister.name,
+ temp_args.devregister.id,
+ temp_args.devregister.base,
+ temp_args.devregister.size,
+ temp_args.devregister.irqs,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_dmaregister(temp_args.dmaregister.name,
+ temp_args.dmaregister.id,
+ temp_args.dmaregister.base,
+ temp_args.dmaregister.size,
+ temp_args.dmaregister.chan_num,
+ temp_args.dmaregister.chan0_dir,
+ temp_args.dmaregister.chan0_irq,
+ temp_args.dmaregister.chan0_poll_mode,
+ temp_args.dmaregister.chan0_include_dre,
+ temp_args.dmaregister.chan0_data_width,
+ temp_args.dmaregister.chan1_dir,
+ temp_args.dmaregister.chan1_irq,
+ temp_args.dmaregister.chan1_poll_mode,
+ temp_args.dmaregister.chan1_include_dre,
+ temp_args.dmaregister.chan1_data_width,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_devunregister_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ xlnk_devpacks_free(temp_args.devunregister.base);
+
+ return 0;
+}
+
+static irqreturn_t xlnk_accel_isr(int irq, void *arg)
+{
+ struct xlnk_irq_control *irq_control = (struct xlnk_irq_control *)arg;
+
+ disable_irq_nosync(irq);
+ complete(&irq_control->cmp);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnk_irq_register_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int i;
+ struct xlnk_irq_control *ctrl;
+ int irq_id = -1;
+ int irq_entry_new = 0;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqregister));
+ if (status)
+ return -ENOMEM;
+
+ if (temp_args.irqregister.type !=
+ (XLNK_IRQ_LEVEL | XLNK_IRQ_ACTIVE_HIGH)) {
+ dev_err(xlnk_dev, "Unsupported interrupt type %x\n",
+ temp_args.irqregister.type);
+ return -EINVAL;
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->irq = xlate_irq(temp_args.irqregister.irq);
+ ctrl->enabled = 0;
+ init_completion(&ctrl->cmp);
+
+ spin_lock(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++) {
+ if (!xlnk_irq_set[i] && irq_id == -1) {
+ irq_entry_new = 1;
+ irq_id = i;
+ xlnk_irq_set[i] = ctrl;
+ } else if (xlnk_irq_set[i] &&
+ xlnk_irq_set[i]->irq == ctrl->irq) {
+ irq_id = i;
+ break;
+ }
+ }
+ spin_unlock(&xlnk_irq_lock);
+
+ if (irq_id == -1) {
+ kfree(ctrl);
+ return -ENOMEM;
+ }
+
+ if (!irq_entry_new) {
+ kfree(ctrl);
+ } else {
+ status = request_irq(ctrl->irq,
+ xlnk_accel_isr,
+ IRQF_SHARED,
+ "xlnk",
+ ctrl);
+ if (status) {
+ enable_irq(ctrl->irq);
+ xlnk_irq_set[irq_id] = NULL;
+ kfree(ctrl);
+ return -EINVAL;
+ }
+ disable_irq_nosync(ctrl->irq);
+ }
+
+ temp_args.irqregister.irq_id = irq_id;
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqregister));
+
+ return status;
+}
+
+static int xlnk_irq_unregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqunregister.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ xlnk_irq_set[irq_id] = NULL;
+
+ if (ctrl->enabled) {
+ disable_irq_nosync(ctrl->irq);
+ complete(&ctrl->cmp);
+ }
+ free_irq(ctrl->irq, ctrl);
+ kfree(ctrl);
+
+ return 0;
+}
+
+static int xlnk_irq_wait_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqwait));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqwait.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!ctrl->enabled) {
+ ctrl->enabled = 1;
+ enable_irq(ctrl->irq);
+ }
+
+ if (temp_args.irqwait.polling) {
+ if (!try_wait_for_completion(&ctrl->cmp))
+ temp_args.irqwait.success = 0;
+ else
+ temp_args.irqwait.success = 1;
+ } else {
+ wait_for_completion(&ctrl->cmp);
+ temp_args.irqwait.success = 1;
+ }
+
+ if (temp_args.irqwait.success) {
+ reinit_completion(&ctrl->cmp);
+ ctrl->enabled = 0;
+ }
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqwait));
+
+ return status;
+}
+
+static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status, size;
+ void *kaddr;
+ xlnk_intptr_type paddr;
+ int buf_id;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
+ status);
+ return -ENOMEM;
+ }
+
+ if (!(temp_args.cachecontrol.action == 0 ||
+ temp_args.cachecontrol.action == 1)) {
+ dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
+ temp_args.cachecontrol.action);
+ return -EINVAL;
+ }
+
+ size = temp_args.cachecontrol.size;
+ paddr = temp_args.cachecontrol.phys_addr;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(paddr);
+ kaddr = xlnk_bufpool[buf_id];
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id == 0) {
+ pr_err("Illegal cachecontrol on non-sds_alloc memory");
+ return -EINVAL;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ __cpuc_flush_dcache_area(kaddr, size);
+ outer_flush_range(paddr, paddr + size);
+ if (temp_args.cachecontrol.action == 1)
+ outer_inv_range(paddr, paddr + size);
+#else
+ if (temp_args.cachecontrol.action == 1)
+ __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
+ else
+ __dma_map_area(kaddr, size, DMA_TO_DEVICE);
+#endif
+ return 0;
+}
+
+static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
+{
+ union xlnk_args args;
+ xlnk_intptr_type p_addr = 0;
+ int status = 0;
+ int buf_id;
+ struct xlnk_dmabuf_reg *cp = NULL;
+ int cacheable = 1;
+ enum dma_data_direction dmadir;
+ xlnk_intptr_type page_id;
+ unsigned int page_offset;
+ struct scatterlist sg;
+ unsigned long attrs = 0;
+
+ status = copy_from_user(&args,
+ (void __user *)arg_addr,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ pr_err("Error in copy_from_user. status = %d\n", status);
+ return status;
+ }
+
+ if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
+ !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
+ pr_err("memop lacks acquire or release flag\n");
+ return -EINVAL;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
+ args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
+ pr_err("memop has both acquire and release defined\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
+ current->pid);
+ if (buf_id > 0) {
+ cacheable = xlnk_bufcacheable[buf_id];
+ p_addr = xlnk_phyaddr[buf_id] +
+ (args.memop.virt_addr - xlnk_userbuf[buf_id]);
+ } else {
+ struct xlnk_dmabuf_reg *dp;
+
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == args.memop.virt_addr) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id <= 0 && !cp) {
+ pr_err("Error, buffer not found\n");
+ return -EINVAL;
+ }
+
+ dmadir = (enum dma_data_direction)args.memop.dir;
+
+ if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (buf_id > 0) {
+ page_id = p_addr >> PAGE_SHIFT;
+ page_offset = p_addr - (page_id << PAGE_SHIFT);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg,
+ pfn_to_page(page_id),
+ args.memop.size,
+ page_offset);
+ sg_dma_len(&sg) = args.memop.size;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
+ if (buf_id > 0) {
+ status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("Failed to map address\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ args.memop.token = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ } else {
+ if (cp->dbuf_sg_table->nents != 1) {
+ pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(cp->dbuf_sg_table->sgl);
+ args.memop.token = 0;
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ }
+ } else {
+ if (buf_id > 0) {
+ sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
+ get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ }
+ }
+
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long xlnk_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(code) > XLNK_IOC_MAXNR)
+ return -ENOTTY;
+
+ /* some sanity check */
+ switch (code) {
+ case XLNK_IOCALLOCBUF:
+ return xlnk_allocbuf_ioctl(filp, code, args);
+ case XLNK_IOCFREEBUF:
+ return xlnk_freebuf_ioctl(filp, code, args);
+ case XLNK_IOCADDDMABUF:
+ return xlnk_adddmabuf_ioctl(filp, code, args);
+ case XLNK_IOCCLEARDMABUF:
+ return xlnk_cleardmabuf_ioctl(filp, code, args);
+ case XLNK_IOCDMAREQUEST:
+ return xlnk_dmarequest_ioctl(filp, code, args);
+ case XLNK_IOCDMASUBMIT:
+ return xlnk_dmasubmit_ioctl(filp, code, args);
+ case XLNK_IOCDMAWAIT:
+ return xlnk_dmawait_ioctl(filp, code, args);
+ case XLNK_IOCDMARELEASE:
+ return xlnk_dmarelease_ioctl(filp, code, args);
+ case XLNK_IOCDEVREGISTER:
+ return xlnk_devregister_ioctl(filp, code, args);
+ case XLNK_IOCDMAREGISTER:
+ return xlnk_dmaregister_ioctl(filp, code, args);
+ case XLNK_IOCDEVUNREGISTER:
+ return xlnk_devunregister_ioctl(filp, code, args);
+ case XLNK_IOCCACHECTRL:
+ return xlnk_cachecontrol_ioctl(filp, code, args);
+ case XLNK_IOCIRQREGISTER:
+ return xlnk_irq_register_ioctl(filp, code, args);
+ case XLNK_IOCIRQUNREGISTER:
+ return xlnk_irq_unregister_ioctl(filp, code, args);
+ case XLNK_IOCIRQWAIT:
+ return xlnk_irq_wait_ioctl(filp, code, args);
+ case XLNK_IOCSHUTDOWN:
+ return xlnk_shutdown(args);
+ case XLNK_IOCRECRES:
+ return xlnk_recover_resource(args);
+ case XLNK_IOCMEMOP:
+ return xlnk_memop_ioctl(filp, args);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct vm_operations_struct xlnk_vm_ops = {
+ .open = xlnk_vma_open,
+ .close = xlnk_vma_close,
+};
+
+/* This function maps kernel space memory to user space memory. */
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int bufid;
+ int status;
+
+ bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
+
+ if (bufid == 0) {
+ unsigned long paddr = virt_to_phys(xlnk_dev_buf);
+
+ status = remap_pfn_range(vma,
+ vma->vm_start,
+ paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ } else {
+ if (xlnk_bufcacheable[bufid] == 0)
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ status = remap_pfn_range(vma, vma->vm_start,
+ xlnk_phyaddr[bufid]
+ >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ xlnk_userbuf[bufid] = vma->vm_start;
+ xlnk_buf_process[bufid] = current->pid;
+ }
+ if (status) {
+ pr_err("%s failed with code %d\n", __func__, status);
+ return status;
+ }
+
+ xlnk_vma_open(vma);
+ vma->vm_ops = &xlnk_vm_ops;
+ vma->vm_private_data = xlnk_bufpool[bufid];
+
+ return 0;
+}
+
+static void xlnk_vma_open(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas++;
+}
+
+static void xlnk_vma_close(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas--;
+}
+
+static int xlnk_shutdown(unsigned long buf)
+{
+ return 0;
+}
+
+static int xlnk_recover_resource(unsigned long buf)
+{
+ xlnk_free_all_buf();
+#ifdef CONFIG_XILINX_DMA_APF
+ xdma_release_all_channels();
+#endif
+ return 0;
+}
+
+module_platform_driver(xlnk_driver);
+
+MODULE_DESCRIPTION("Xilinx APF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk.h b/drivers/staging/apf/xlnk.h
new file mode 100644
index 000000000000..cbc2334c2e82
--- /dev/null
+++ b/drivers/staging/apf/xlnk.h
@@ -0,0 +1,175 @@
+#ifndef _XLNK_OS_H
+#define _XLNK_OS_H
+
+#include <linux/stddef.h>
+#include <linux/dmaengine.h>
+#include "xilinx-dma-apf.h"
+#include "xlnk-sysdef.h"
+
+#define XLNK_FLAG_COHERENT 0x00000001
+#define XLNK_FLAG_KERNEL_BUFFER 0x00000002
+#define XLNK_FLAG_DMAPOLLING 0x00000004
+#define XLNK_FLAG_IOMMU_VALID 0x00000008
+#define XLNK_FLAG_PHYSICAL_ADDR 0x00000100
+#define XLNK_FLAG_VIRTUAL_ADDR 0x00000200
+#define XLNK_FLAG_MEM_ACQUIRE 0x00001000
+#define XLNK_FLAG_MEM_RELEASE 0x00002000
+#define CF_FLAG_CACHE_FLUSH_INVALIDATE 0x00000001
+#define CF_FLAG_PHYSICALLY_CONTIGUOUS 0x00000002
+#define CF_FLAG_DMAPOLLING 0x00000004
+#define XLNK_IRQ_LEVEL 0x00000001
+#define XLNK_IRQ_EDGE 0x00000002
+#define XLNK_IRQ_ACTIVE_HIGH 0x00000004
+#define XLNK_IRQ_ACTIVE_LOW 0x00000008
+#define XLNK_IRQ_RESET_REG_VALID 0x00000010
+
+enum xlnk_dma_direction {
+ XLNK_DMA_BI = 0,
+ XLNK_DMA_TO_DEVICE = 1,
+ XLNK_DMA_FROM_DEVICE = 2,
+ XLNK_DMA_NONE = 3,
+};
+
+struct xlnk_dma_transfer_handle {
+ dma_addr_t dma_addr;
+ unsigned long transfer_length;
+ void *kern_addr;
+ unsigned long user_addr;
+ enum dma_data_direction transfer_direction;
+ int sg_effective_length;
+ int flags;
+ struct dma_chan *channel;
+ dma_cookie_t dma_cookie;
+ struct dma_async_tx_descriptor *async_desc;
+ struct completion completion_handle;
+};
+
+struct xlnk_dmabuf_reg {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_vaddr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+ int is_mapped;
+ int dma_direction;
+ struct list_head list;
+};
+
+struct xlnk_irq_control {
+ int irq;
+ int enabled;
+ struct completion cmp;
+};
+
+/* CROSSES KERNEL-USER BOUNDARY */
+union xlnk_args {
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type len;
+ xlnk_int_type id;
+ xlnk_intptr_type phyaddr;
+ xlnk_byte_type cacheable;
+ } allocbuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type id;
+ xlnk_intptr_type buf;
+ } freebuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_addr;
+ } dmabuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[64];
+ xlnk_intptr_type dmachan;
+ xlnk_uint_type bd_space_phys_addr;
+ xlnk_uint_type bd_space_size;
+ } dmarequest;
+#define XLNK_MAX_APPWORDS 5
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ xlnk_intptr_type buf;
+ xlnk_intptr_type buf2;
+ xlnk_uint_type buf_offset;
+ xlnk_uint_type len;
+ xlnk_uint_type bufflag;
+ xlnk_intptr_type sglist;
+ xlnk_uint_type sgcnt;
+ xlnk_enum_type dmadir;
+ xlnk_uint_type nappwords_i;
+ xlnk_uint_type appwords_i[XLNK_MAX_APPWORDS];
+ xlnk_uint_type nappwords_o;
+ xlnk_uint_type flag;
+ xlnk_intptr_type dmahandle; /* return value */
+ xlnk_uint_type last_bd_index;
+ } dmasubmit;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmahandle;
+ xlnk_uint_type nappwords;
+ xlnk_uint_type appwords[XLNK_MAX_APPWORDS];
+ /* appwords array we only accept 5 max */
+ xlnk_uint_type flags;
+ } dmawait;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ } dmarelease;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type irqs[8];
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ } devregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ } devunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type chan_num;
+ xlnk_uint_type chan0_dir;
+ xlnk_uint_type chan0_irq;
+ xlnk_uint_type chan0_poll_mode;
+ xlnk_uint_type chan0_include_dre;
+ xlnk_uint_type chan0_data_width;
+ xlnk_uint_type chan1_dir;
+ xlnk_uint_type chan1_irq;
+ xlnk_uint_type chan1_poll_mode;
+ xlnk_uint_type chan1_include_dre;
+ xlnk_uint_type chan1_data_width;
+ } dmaregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type phys_addr;
+ xlnk_uint_type size;
+ xlnk_int_type action;
+ } cachecontrol;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type virt_addr;
+ xlnk_int_type size;
+ xlnk_enum_type dir;
+ xlnk_int_type flags;
+ xlnk_intptr_type phys_addr;
+ xlnk_intptr_type token;
+ } memop;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq;
+ xlnk_int_type subirq;
+ xlnk_uint_type type;
+ xlnk_intptr_type control_base;
+ xlnk_intptr_type reset_reg_base;
+ xlnk_uint_type reset_offset;
+ xlnk_uint_type reset_valid_high;
+ xlnk_uint_type reset_valid_low;
+ xlnk_int_type irq_id;
+ } irqregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ } irqunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ xlnk_int_type polling;
+ xlnk_int_type success;
+ } irqwait;
+};
+
+#endif
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
deleted file mode 100644
index 04be22dca9b6..000000000000
--- a/drivers/staging/clocking-wizard/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Xilinx Clocking Wizard Driver
-#
-
-config COMMON_CLK_XLNX_CLKWZRD
- tristate "Xilinx Clocking Wizard"
- depends on COMMON_CLK && OF
- help
- Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
deleted file mode 100644
index b1f915224d96..000000000000
--- a/drivers/staging/clocking-wizard/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
deleted file mode 100644
index ebe99db7d153..000000000000
--- a/drivers/staging/clocking-wizard/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-TODO:
- - support for fractional multiplier
- - support for fractional divider (output 0 only)
- - support for set_rate() operations (may benefit from Stephen Boyd's
- refactoring of the clk primitives: https://lkml.org/lkml/2014/9/5/766)
- - review arithmetic
- - overflow after multiplication?
- - maximize accuracy before divisions
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
deleted file mode 100644
index e52a64be93f3..000000000000
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ /dev/null
@@ -1,333 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx 'Clocking Wizard' driver
- *
- * Copyright (C) 2013 - 2014 Xilinx
- *
- * Sören Brinkmann <soren.brinkmann@xilinx.com>
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/module.h>
-#include <linux/err.h>
-
-#define WZRD_NUM_OUTPUTS 7
-#define WZRD_ACLK_MAX_FREQ 250000000UL
-
-#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
-
-#define WZRD_CLKOUT0_FRAC_EN BIT(18)
-#define WZRD_CLKFBOUT_FRAC_EN BIT(26)
-
-#define WZRD_CLKFBOUT_MULT_SHIFT 8
-#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
-#define WZRD_DIVCLK_DIVIDE_SHIFT 0
-#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
-#define WZRD_CLKOUT_DIVIDE_SHIFT 0
-#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
-
-enum clk_wzrd_int_clks {
- wzrd_clk_mul,
- wzrd_clk_mul_div,
- wzrd_clk_int_max
-};
-
-/**
- * struct clk_wzrd:
- * @clk_data: Clock data
- * @nb: Notifier block
- * @base: Memory base
- * @clk_in1: Handle to input clock 'clk_in1'
- * @axi_clk: Handle to input clock 's_axi_aclk'
- * @clks_internal: Internal clocks
- * @clkout: Output clocks
- * @speed_grade: Speed grade of the device
- * @suspended: Flag indicating power state of the device
- */
-struct clk_wzrd {
- struct clk_onecell_data clk_data;
- struct notifier_block nb;
- void __iomem *base;
- struct clk *clk_in1;
- struct clk *axi_clk;
- struct clk *clks_internal[wzrd_clk_int_max];
- struct clk *clkout[WZRD_NUM_OUTPUTS];
- unsigned int speed_grade;
- bool suspended;
-};
-
-#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
-
-/* maximum frequencies for input/output clocks per speed grade */
-static const unsigned long clk_wzrd_max_freq[] = {
- 800000000UL,
- 933000000UL,
- 1066000000UL
-};
-
-static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- unsigned long max;
- struct clk_notifier_data *ndata = data;
- struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
-
- if (clk_wzrd->suspended)
- return NOTIFY_OK;
-
- if (ndata->clk == clk_wzrd->clk_in1)
- max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
- else if (ndata->clk == clk_wzrd->axi_clk)
- max = WZRD_ACLK_MAX_FREQ;
- else
- return NOTIFY_DONE; /* should never happen */
-
- switch (event) {
- case PRE_RATE_CHANGE:
- if (ndata->new_rate > max)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- case POST_RATE_CHANGE:
- case ABORT_RATE_CHANGE:
- default:
- return NOTIFY_DONE;
- }
-}
-
-static int __maybe_unused clk_wzrd_suspend(struct device *dev)
-{
- struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
-
- clk_disable_unprepare(clk_wzrd->axi_clk);
- clk_wzrd->suspended = true;
-
- return 0;
-}
-
-static int __maybe_unused clk_wzrd_resume(struct device *dev)
-{
- int ret;
- struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
-
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(dev, "unable to enable s_axi_aclk\n");
- return ret;
- }
-
- clk_wzrd->suspended = false;
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
- clk_wzrd_resume);
-
-static int clk_wzrd_probe(struct platform_device *pdev)
-{
- int i, ret;
- u32 reg;
- unsigned long rate;
- const char *clk_name;
- struct clk_wzrd *clk_wzrd;
- struct device_node *np = pdev->dev.of_node;
-
- clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
- if (!clk_wzrd)
- return -ENOMEM;
- platform_set_drvdata(pdev, clk_wzrd);
-
- clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(clk_wzrd->base))
- return PTR_ERR(clk_wzrd->base);
-
- ret = of_property_read_u32(np, "speed-grade", &clk_wzrd->speed_grade);
- if (!ret) {
- if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
- dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
- clk_wzrd->speed_grade);
- clk_wzrd->speed_grade = 0;
- }
- }
-
- clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1)) {
- if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "clk_in1 not found\n");
- return PTR_ERR(clk_wzrd->clk_in1);
- }
-
- clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk)) {
- if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "s_axi_aclk not found\n");
- return PTR_ERR(clk_wzrd->axi_clk);
- }
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
- return ret;
- }
- rate = clk_get_rate(clk_wzrd->axi_clk);
- if (rate > WZRD_ACLK_MAX_FREQ) {
- dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
- rate);
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- /* we don't support fractional div/mul yet */
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_FRAC_EN;
- reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLKOUT0_FRAC_EN;
- if (reg)
- dev_warn(&pdev->dev, "fractional div/mul not supported\n");
-
- /* register multiplier */
- reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
- clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
- clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
- (&pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clk_in1),
- 0, reg, 1);
- kfree(clk_name);
- if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
- dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
- goto err_disable_clk;
- }
-
- /* register div */
- reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_DIVCLK_DIVIDE_MASK) >> WZRD_DIVCLK_DIVIDE_SHIFT;
- clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
-
- clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_fixed_factor
- (&pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
- 0, 1, reg);
- if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
- dev_err(&pdev->dev, "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
- goto err_rm_int_clk;
- }
-
- /* register div per output */
- for (i = WZRD_NUM_OUTPUTS - 1; i >= 0 ; i--) {
- const char *clkout_name;
-
- if (of_property_read_string_index(np, "clock-output-names", i,
- &clkout_name)) {
- dev_err(&pdev->dev,
- "clock output name not specified\n");
- ret = -EINVAL;
- goto err_rm_int_clks;
- }
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
- reg &= WZRD_CLKOUT_DIVIDE_MASK;
- reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
- clk_wzrd->clkout[i] = clk_register_fixed_factor
- (&pdev->dev, clkout_name, clk_name, 0, 1, reg);
- if (IS_ERR(clk_wzrd->clkout[i])) {
- int j;
-
- for (j = i + 1; j < WZRD_NUM_OUTPUTS; j++)
- clk_unregister(clk_wzrd->clkout[j]);
- dev_err(&pdev->dev,
- "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clkout[i]);
- goto err_rm_int_clks;
- }
- }
-
- kfree(clk_name);
-
- clk_wzrd->clk_data.clks = clk_wzrd->clkout;
- clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
-
- if (clk_wzrd->speed_grade) {
- clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
-
- ret = clk_notifier_register(clk_wzrd->clk_in1,
- &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
-
- ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
- }
-
- return 0;
-
-err_rm_int_clks:
- clk_unregister(clk_wzrd->clks_internal[1]);
-err_rm_int_clk:
- kfree(clk_name);
- clk_unregister(clk_wzrd->clks_internal[0]);
-err_disable_clk:
- clk_disable_unprepare(clk_wzrd->axi_clk);
-
- return ret;
-}
-
-static int clk_wzrd_remove(struct platform_device *pdev)
-{
- int i;
- struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
-
- of_clk_del_provider(pdev->dev.of_node);
-
- for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
- clk_unregister(clk_wzrd->clkout[i]);
- for (i = 0; i < wzrd_clk_int_max; i++)
- clk_unregister(clk_wzrd->clks_internal[i]);
-
- if (clk_wzrd->speed_grade) {
- clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
- clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
- }
-
- clk_disable_unprepare(clk_wzrd->axi_clk);
-
- return 0;
-}
-
-static const struct of_device_id clk_wzrd_ids[] = {
- { .compatible = "xlnx,clocking-wizard" },
- { },
-};
-MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
-
-static struct platform_driver clk_wzrd_driver = {
- .driver = {
- .name = "clk-wizard",
- .of_match_table = clk_wzrd_ids,
- .pm = &clk_wzrd_dev_pm_ops,
- },
- .probe = clk_wzrd_probe,
- .remove = clk_wzrd_remove,
-};
-module_platform_driver(clk_wzrd_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
-MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
diff --git a/drivers/staging/fclk/Kconfig b/drivers/staging/fclk/Kconfig
new file mode 100644
index 000000000000..5f68261a206d
--- /dev/null
+++ b/drivers/staging/fclk/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx PL clk enabler
+#
+
+config XILINX_FCLK
+ tristate "Xilinx PL clock enabler"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx fclk clock enabler.
diff --git a/drivers/staging/fclk/Makefile b/drivers/staging/fclk/Makefile
new file mode 100644
index 000000000000..71723036c94e
--- /dev/null
+++ b/drivers/staging/fclk/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_FCLK) += xilinx_fclk.o
diff --git a/drivers/staging/fclk/TODO b/drivers/staging/fclk/TODO
new file mode 100644
index 000000000000..912325fe5f4d
--- /dev/null
+++ b/drivers/staging/fclk/TODO
@@ -0,0 +1,2 @@
+TODO:
+ - Remove this hack and clock adapt all the drivers.
diff --git a/drivers/staging/fclk/dt-binding.txt b/drivers/staging/fclk/dt-binding.txt
new file mode 100644
index 000000000000..23521608b4a8
--- /dev/null
+++ b/drivers/staging/fclk/dt-binding.txt
@@ -0,0 +1,16 @@
+Binding for Xilinx pl clocks
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+ - compatible: Must be 'xlnx,fclk'
+ - clocks: Handle to input clock
+
+Example:
+ fclk3: fclk3 {
+ status = "disabled";
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+ };
diff --git a/drivers/staging/fclk/xilinx_fclk.c b/drivers/staging/fclk/xilinx_fclk.c
new file mode 100644
index 000000000000..0428b7aba946
--- /dev/null
+++ b/drivers/staging/fclk/xilinx_fclk.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx fclk clock driver.
+ * Copyright (c) 2017 - 2020 Xilinx Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct fclk_state {
+ struct device *dev;
+ struct clk *pl;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id fclk_of_match[] = {
+ { .compatible = "xlnx,fclk",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, fclk_of_match);
+
+static ssize_t set_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(st->pl));
+}
+
+static ssize_t set_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long rate;
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 0, &rate);
+ if (ret)
+ return -EINVAL;
+
+ rate = clk_round_rate(st->pl, rate);
+ ret = clk_set_rate(st->pl, rate);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(set_rate);
+
+static const struct attribute *fclk_ctrl_attrs[] = {
+ &dev_attr_set_rate.attr,
+ NULL,
+};
+
+static const struct attribute_group fclk_ctrl_attr_grp = {
+ .attrs = (struct attribute **)fclk_ctrl_attrs,
+};
+
+static int fclk_probe(struct platform_device *pdev)
+{
+ struct fclk_state *st;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ platform_set_drvdata(pdev, st);
+
+ st->pl = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->pl))
+ return PTR_ERR(st->pl);
+
+ ret = clk_prepare_enable(st->pl);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &fclk_ctrl_attr_grp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fclk_remove(struct platform_device *pdev)
+{
+ struct fclk_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->pl);
+ return 0;
+}
+
+static struct platform_driver fclk_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fclk_of_match,
+ },
+ .probe = fclk_probe,
+ .remove = fclk_remove,
+};
+
+module_platform_driver(fclk_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("fclk enable");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_tsmux/Kconfig b/drivers/staging/xlnx_tsmux/Kconfig
new file mode 100644
index 000000000000..0c1d9498e35b
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_TSMUX
+ tristate "Xilinx MPEG2 Transport Stream Muxer"
+ select DMA_SHARED_BUFFER
+ help
+ This driver is developed for mpeg2 transport stream muxer,
+ designed to allow passage of multimedia streams from the source
+ kernel sub-system, prepares mpeg2 transport stream and forward
+ to the sink kernel subsystem.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N.
diff --git a/drivers/staging/xlnx_tsmux/MAINTAINERS b/drivers/staging/xlnx_tsmux/MAINTAINERS
new file mode 100644
index 000000000000..cfab4fa55698
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX MPG2TSMUX DRIVER
+M: Venkateshwar Rao <venkateshwar.rao.gannavarapu@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_tsmux
diff --git a/drivers/staging/xlnx_tsmux/Makefile b/drivers/staging/xlnx_tsmux/Makefile
new file mode 100644
index 000000000000..4437068337e7
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_mpg2tsmux.o
diff --git a/drivers/staging/xlnx_tsmux/dt-binding.txt b/drivers/staging/xlnx_tsmux/dt-binding.txt
new file mode 100644
index 000000000000..e4a7095d92e1
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/dt-binding.txt
@@ -0,0 +1,28 @@
+The Xilinx mpegtsmux IP reads the elementary streams from memory and
+writes the MPEG2 TS(transport stream) to memory.
+
+The mpeg2 ts muxer follows the dma descriptor based approach. Each DMA
+descriptor contains information about each of the elementary stream
+buffer properties and buffer address. It reads the descriptors one after
+the other and generates the TS packets with the information in the
+descriptor. The IP writes the generated TS packets at the output buffer
+address.
+
+Required properties:
+
+- compatible: must be "xlnx,tsmux-1.0"
+- interrupts: interrupt number
+- interrupts-parent: phandle for interrupt controller
+- reg: base address and size of the IP core
+- clock-names: must contain "ap_clk"
+- clocks: phandle to AXI Lite
+
+Example:
+ ts2mux: ts2mux@0xa0200000 {
+ compatible = "xlnx,tsmux-1.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ clock-names = "ap_clk";
+ clocks = <&misc_clk_0>;
+ };
diff --git a/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
new file mode 100644
index 000000000000..60041864de25
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
@@ -0,0 +1,1568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx TS mux driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <venkateshwar.rao.gannavarapu@xilinx.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <uapi/linux/xlnx_mpg2tsmux_interface.h>
+
+#define DRIVER_NAME "mpegtsmux-1.0"
+#define DRIVER_CLASS "mpg2mux_ts_cls"
+#define DRIVER_MAX_DEV (10)
+
+/* Register offsets and bit masks */
+#define XTSMUX_RST_CTRL 0x00
+#define XTSMUX_GLBL_IER 0x04
+#define XTSMUX_IER_STAT 0x08
+#define XTSMUX_ISR_STAT 0x0c
+#define XTSMUX_ERR_STAT 0x10
+#define XTSMUX_LAST_NODE_PROCESSED 0x14
+#define XTSMUX_MUXCONTEXT_ADDR 0x20
+#define XTSMUX_STREAMCONTEXT_ADDR 0x30
+#define XTSMUX_NUM_STREAM_IDTBL 0x48
+#define XTSMUX_NUM_DESC 0x70
+#define XTSMUX_STREAM_IDTBL_ADDR 0x78
+#define XTSMUX_CONTEXT_DATA_SIZE 64
+
+#define XTSMUX_RST_CTRL_START_MASK BIT(0)
+#define XTSMUX_GLBL_IER_ENABLE_MASK BIT(0)
+#define XTSMUX_IER_ENABLE_MASK BIT(0)
+
+/* Number of input/output streams supported */
+#define XTSMUX_MAXIN_STRM 112
+#define XTSMUX_MAXIN_PLSTRM 16
+#define XTSMUX_MAXIN_TLSTRM (XTSMUX_MAXIN_STRM + XTSMUX_MAXIN_PLSTRM)
+#define XTSMUX_MAXOUT_STRM 112
+#define XTSMUX_MAXOUT_PLSTRM 16
+#define XTSMUX_MAXOUT_TLSTRM (XTSMUX_MAXOUT_STRM + XTSMUX_MAXOUT_PLSTRM)
+#define XTSMUX_POOL_SIZE 128
+/* Initial version is tested with 256 align only */
+#define XTSMUX_POOL_ALIGN 256
+#define XTSMUX_STRMBL_FREE 0
+#define XTSMUX_STRMBL_BUSY 1
+
+/**
+ * struct stream_context - struct to enqueue a stream context descriptor
+ * @command: stream context type
+ * @is_pcr_stream: flag for pcr(programmable clock recovery) stream
+ * @stream_id: stream identification number
+ * @extended_stream_id: extended stream id
+ * @reserved1: reserved for hardware alignment
+ * @pid: packet id number
+ * @dmabuf_id: 0 for buf allocated by driver, nonzero for external buf
+ * @size_data_in: size in bytes of input buffer
+ * @pts: presentation time stamp
+ * @dts: display time stamp
+ * @in_buf_pointer: physical address of src buf address
+ * @reserved2: reserved for hardware alignment
+ * @insert_pcr: inserting pcr in stream context
+ * @reserved3: reserved for hardware alignment
+ * @pcr_extension: pcr extension number
+ * @pcr_base: pcr base number
+ */
+struct stream_context {
+ enum ts_mux_command command;
+ bool is_pcr_stream;
+ u8 stream_id;
+ u8 extended_stream_id;
+ u8 reserved1;
+ u16 pid;
+ u16 dmabuf_id;
+ u32 size_data_in;
+ u64 pts;
+ u64 dts;
+ u64 in_buf_pointer;
+ u32 reserved2;
+ bool insert_pcr;
+ bool reserved3;
+ u16 pcr_extension;
+ u64 pcr_base;
+};
+
+/**
+ * enum node_status_info - status of stream context
+ * @NOT_FILLED: node not filled
+ * @UPDATED_BY_DRIVER: updated by driver
+ * @READ_BY_IP: read by IP
+ * @USED_BY_IP: used by IP
+ * @NODE_INVALID: invalid node
+ */
+enum node_status_info {
+ NOT_FILLED = 0,
+ UPDATED_BY_DRIVER,
+ READ_BY_IP,
+ USED_BY_IP,
+ NODE_INVALID
+};
+
+/**
+ * enum stream_errors - stream context error type
+ * @NO_ERROR: no error
+ * @PARTIAL_FRAME_WRITTEN: partial frame written
+ * @DESCRIPTOR_NOT_READABLE: descriptor not readable
+ */
+enum stream_errors {
+ NO_ERROR = 0,
+ PARTIAL_FRAME_WRITTEN,
+ DESCRIPTOR_NOT_READABLE
+};
+
+/**
+ * struct strm_node - struct to describe stream node in linked list
+ * @node_number: node number to handle streams
+ * @node_status: status of stream node
+ * @element: stream context info
+ * @error_code: error codes
+ * @reserved1: reserved bits for hardware align
+ * @tail_pointer: physical address of next stream node in linked list
+ * @strm_phy_addr: physical address of stream context
+ * @node: struct of linked list head
+ * @reserved2: reserved for hardware align
+ */
+struct stream_context_node {
+ u32 node_number;
+ enum node_status_info node_status;
+ struct stream_context element;
+ enum stream_errors error_code;
+ u32 reserved1;
+ u64 tail_pointer;
+ u64 strm_phy_addr;
+ struct list_head node;
+ u64 reserved2;
+};
+
+/**
+ * struct strm_info - struct to describe streamid node in streamid table
+ * @pid: identification number of stream
+ * @continuity_counter: counter to maintain packet count for a stream
+ * @usageflag: flag to know free or under use for allocating streamid node
+ * @strmtbl_update: struct to know enqueue or dequeue streamid in table
+ */
+struct stream_info {
+ u16 pid;
+ u8 continuity_counter;
+ bool usageflag;
+ enum strmtbl_cnxt strmtbl_update;
+};
+
+/* Enum for error handling of mux context */
+enum mux_op_errs {
+ MUXER_NO_ERROR = 0,
+ ERROR_OUTPUT_BUFFER_IS_NOT_ACCESIBLE,
+ ERROR_PARTIAL_PACKET_WRITTEN
+};
+
+/**
+ * struct muxer_context - struct to describe mux node in linked list
+ * @node_status: status of mux node
+ * @reserved: reserved for hardware align
+ * @dst_buf_start_addr: physical address of dst buf
+ * @dst_buf_size: size of the output buffer
+ * @dst_buf_written: size of data written in dst buf
+ * @num_of_pkts_written: number of packets in dst buf
+ * @error_code: error status of mux node updated by IP
+ * @mux_phy_addr: physical address of muxer
+ * @node: struct of linked list head
+ */
+struct muxer_context {
+ enum node_status_info node_status;
+ u32 reserved;
+ u64 dst_buf_start_addr;
+ u32 dst_buf_size;
+ u32 dst_buf_written;
+ u32 num_of_pkts_written;
+ enum mux_op_errs error_code;
+ u64 mux_phy_addr;
+ struct list_head node;
+};
+
+/**
+ * struct xlnx_tsmux_dmabufintl - dma buf internal info
+ * @dbuf: reference to a buffer's dmabuf struct
+ * @attach: attachment to the buffer's dmabuf
+ * @sgt: scatterlist info for the buffer's dmabuf
+ * @dmabuf_addr: buffer physical address
+ * @dmabuf_fd: dma buffer fd
+ * @buf_id: dma buffer reference id
+ */
+struct xlnx_tsmux_dmabufintl {
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t dmabuf_addr;
+ s32 dmabuf_fd;
+ u16 buf_id;
+};
+
+/**
+ * struct xlnx_tsmux - xilinx mpeg2 TS muxer device
+ * @dev: pointer to struct device instance used by the driver
+ * @iomem: base address of the HW/IP
+ * @chdev: char device handle
+ * @user_count: count of users who have opened the device
+ * @lock: spinlock to protect driver data structures
+ * @waitq: wait queue used by the driver
+ * @irq: irq number
+ * @id: device instance ID
+ * @num_inbuf: number of input buffers allocated uisng DMA
+ * @num_outbuf: number of output buffers allocated uisng DMA
+ * @srcbuf_size: size of each source buffer
+ * @dstbuf_size: size of each destination buffer
+ * @strm_node: list containing descriptors of stream context
+ * @mux_node: list containing descriptors of mux context
+ * @stcxt_node_cnt: stream number used for maintaing list
+ * @num_strmnodes: number of stream nodes in the streamid table
+ * @intn_stream_count: internal count of streams added to stream context
+ * @outbuf_idx: index number to maintain output buffers
+ * @srcbuf_addrs: physical address of source buffer
+ * @dstbuf_addrs: physical address of destination buffer
+ * @src_kaddrs: kernel VA for source buffer allocated by the driver
+ * @dst_kaddrs: kernel VA for destination buffer allocated by the driver
+ * @strm_ctx_pool: dma pool to allocate stream context buffers
+ * @mux_ctx_pool: dma pool to allocate mux context buffers
+ * @strmtbl_addrs: physical address of streamid table
+ * @strmtbl_kaddrs: kernel VA for streamid table
+ * @intn_strmtbl_addrs: physical address of streamid table for internal
+ * @intn_strmtbl_kaddrs: kernel VA for streamid table for internal
+ * @ap_clk: interface clock
+ * @src_dmabufintl: array of src DMA buf allocated by user
+ * @dst_dmabufintl: array of src DMA buf allocated by user
+ * @outbuf_written: size in bytes written in output buffer
+ * @stream_count: stream count
+ */
+struct xlnx_tsmux {
+ struct device *dev;
+ void __iomem *iomem;
+ struct cdev chdev;
+ atomic_t user_count;
+ /* lock is used to protect access to sync_err and wdg_err */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ s32 irq;
+ s32 id;
+ u32 num_inbuf;
+ u32 num_outbuf;
+ size_t srcbuf_size;
+ size_t dstbuf_size;
+ struct list_head strm_node;
+ struct list_head mux_node;
+ u32 stcxt_node_cnt;
+ u32 num_strmnodes;
+ atomic_t intn_stream_count;
+ atomic_t outbuf_idx;
+ dma_addr_t srcbuf_addrs[XTSMUX_MAXIN_TLSTRM];
+ dma_addr_t dstbuf_addrs[XTSMUX_MAXOUT_TLSTRM];
+ void *src_kaddrs[XTSMUX_MAXIN_TLSTRM];
+ void *dst_kaddrs[XTSMUX_MAXOUT_TLSTRM];
+ struct dma_pool *strm_ctx_pool;
+ struct dma_pool *mux_ctx_pool;
+ dma_addr_t strmtbl_addrs;
+ void *strmtbl_kaddrs;
+ dma_addr_t intn_strmtbl_addrs;
+ void *intn_strmtbl_kaddrs;
+ struct clk *ap_clk;
+ struct xlnx_tsmux_dmabufintl src_dmabufintl[XTSMUX_MAXIN_STRM];
+ struct xlnx_tsmux_dmabufintl dst_dmabufintl[XTSMUX_MAXOUT_STRM];
+ s32 outbuf_written;
+ atomic_t stream_count;
+};
+
+static inline u32 xlnx_tsmux_read(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg)
+{
+ return ioread32(mpgmuxts->iomem + reg);
+}
+
+static inline void xlnx_tsmux_write(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u32 val)
+{
+ iowrite32(val, (void __iomem *)(mpgmuxts->iomem + reg));
+}
+
+/* TODO: Optimize using iowrite64 call */
+static inline void xlnx_tsmux_write64(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u64 val)
+{
+ iowrite32(lower_32_bits(val), (void __iomem *)(mpgmuxts->iomem + reg));
+ iowrite32(upper_32_bits(val), (void __iomem *)(mpgmuxts->iomem +
+ reg + 4));
+}
+
+static int xlnx_tsmux_start_muxer(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *new_strm_node;
+ struct muxer_context *new_mux_node;
+
+ new_mux_node = list_first_entry_or_null(&mpgmuxts->mux_node,
+ struct muxer_context, node);
+ if (!new_mux_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_MUXCONTEXT_ADDR,
+ new_mux_node->mux_phy_addr);
+
+ new_strm_node = list_first_entry_or_null(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ if (!new_strm_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAMCONTEXT_ADDR,
+ new_strm_node->strm_phy_addr);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_DESC,
+ atomic_read(&mpgmuxts->intn_stream_count));
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAM_IDTBL_ADDR,
+ (u64)mpgmuxts->intn_strmtbl_addrs);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_STREAM_IDTBL, 1);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER,
+ XTSMUX_GLBL_IER_ENABLE_MASK);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT,
+ XTSMUX_IER_ENABLE_MASK);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL,
+ XTSMUX_RST_CTRL_START_MASK);
+
+ return 0;
+}
+
+static void xlnx_tsmux_stop_muxer(const struct xlnx_tsmux *mpgmuxts)
+{
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL, 0);
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_status(const struct
+ xlnx_tsmux * mpgmuxts)
+{
+ u32 status;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_RST_CTRL);
+
+ if (!status)
+ return MPG2MUX_ERROR;
+
+ if (status & XTSMUX_RST_CTRL_START_MASK)
+ return MPG2MUX_BUSY;
+
+ return MPG2MUX_READY;
+}
+
+static struct class *xlnx_tsmux_class;
+static dev_t xlnx_tsmux_devt;
+static atomic_t xlnx_tsmux_ndevs = ATOMIC_INIT(0);
+
+static int xlnx_tsmux_open(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux;
+
+ mpgtsmux = container_of(pin->i_cdev, struct xlnx_tsmux, chdev);
+
+ fptr->private_data = mpgtsmux;
+ atomic_inc(&mpgtsmux->user_count);
+ atomic_set(&mpgtsmux->outbuf_idx, 0);
+ mpgtsmux->stcxt_node_cnt = 0;
+
+ return 0;
+}
+
+static int xlnx_tsmux_release(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux = (struct xlnx_tsmux *)fptr->private_data;
+
+ if (!mpgtsmux)
+ return -EIO;
+
+ return 0;
+}
+
+/* TODO: Optimize buf alloc, dealloc API's to accommodate src, dst, strmtbl */
+static int xlnx_tsmux_ioctl_srcbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ if (!mpgmuxts->src_kaddrs[i] || !mpgmuxts->srcbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->srcbuf_size,
+ mpgmuxts->src_kaddrs[i],
+ mpgmuxts->srcbuf_addrs[i]);
+ mpgmuxts->src_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_srcbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read input buffer info\n");
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXIN_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive input payload. supported %d",
+ XTSMUX_MAXIN_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_inbuf = buf_data.num_buf;
+ mpgmuxts->srcbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application
+ * and initial version of driver tested with 32-bit addressing only
+ */
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ mpgmuxts->src_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->srcbuf_size,
+ &mpgmuxts->srcbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->src_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dma alloc fail %d buffer", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ if (!mpgmuxts->dst_kaddrs[i] || !mpgmuxts->dstbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->dstbuf_size,
+ mpgmuxts->dst_kaddrs[i],
+ mpgmuxts->dstbuf_addrs[i]);
+ mpgmuxts->dst_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "%s: Failed to read output buffer info",
+ __func__);
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXOUT_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive output payload supported %d",
+ XTSMUX_MAXOUT_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_outbuf = buf_data.num_buf;
+ mpgmuxts->dstbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application*/
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ mpgmuxts->dst_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->dstbuf_size,
+ &mpgmuxts->dstbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->dst_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for %d", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ u32 buf_size;
+
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+ if (!mpgmuxts->strmtbl_kaddrs || !mpgmuxts->strmtbl_addrs)
+ return 0;
+
+ dma_free_coherent(mpgmuxts->dev, buf_size, mpgmuxts->strmtbl_kaddrs,
+ mpgmuxts->strmtbl_addrs);
+ mpgmuxts->strmtbl_kaddrs = NULL;
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs || !mpgmuxts->intn_strmtbl_addrs)
+ return 0;
+ dma_free_coherent(mpgmuxts->dev, buf_size,
+ mpgmuxts->intn_strmtbl_kaddrs,
+ mpgmuxts->intn_strmtbl_addrs);
+ mpgmuxts->intn_strmtbl_kaddrs = NULL;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret, buf_size;
+ u16 num_nodes;
+
+ ret = copy_from_user(&num_nodes, arg, sizeof(u16));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read streamid table info");
+ return ret;
+ }
+ mpgmuxts->num_strmnodes = num_nodes;
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+
+ mpgmuxts->strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for strm table");
+ return -ENOMEM;
+ }
+
+ /* Allocating memory for internal streamid table */
+ mpgmuxts->intn_strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->intn_strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for intr strm table");
+ goto exist_free;
+ }
+
+ return 0;
+exist_free:
+ xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+/**
+ * xlnx_tsmux_update_intstrm_tbl - updates stream id table
+ * @mpgmuxts: pointer to the device structure
+ *
+ * This function updates the stream id table
+ *
+ * Return: 0 on success and error value on failure.
+ *
+ */
+static int xlnx_tsmux_update_intstrm_tbl(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_info *cptr, *intn_cptr;
+ int i, j;
+
+ cptr = (struct stream_info *)mpgmuxts->strmtbl_kaddrs;
+
+ if (!cptr->usageflag)
+ return 0;
+
+ for (i = 0; i < mpgmuxts->num_strmnodes && cptr->usageflag;
+ i++, cptr++) {
+ intn_cptr = (struct stream_info *)mpgmuxts->intn_strmtbl_kaddrs;
+ /* Adding to table */
+ if (cptr->strmtbl_update == ADD_TO_TBL) {
+ for (j = 0; j < mpgmuxts->num_strmnodes;
+ j++, intn_cptr++) {
+ if (!intn_cptr->usageflag) {
+ intn_cptr->pid = cptr->pid;
+ intn_cptr->continuity_counter = 0;
+ intn_cptr->usageflag = 1;
+ cptr->usageflag = 0;
+ break;
+ }
+ }
+ if (j == mpgmuxts->num_strmnodes)
+ return -EIO;
+ } else if (cptr->strmtbl_update == DEL_FR_TBL) {
+ /* deleting from table */
+ for (j = 0; j < mpgmuxts->num_strmnodes; j++,
+ intn_cptr++) {
+ if (intn_cptr->usageflag) {
+ if (intn_cptr->pid == cptr->pid) {
+ intn_cptr->pid = 0;
+ intn_cptr->continuity_counter = 0;
+ intn_cptr->usageflag = 0;
+ cptr->usageflag = 0;
+ break;
+ }
+ }
+ }
+ if (j == mpgmuxts->num_strmnodes)
+ return -EIO;
+ } else {
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_update_strminfo_table(struct xlnx_tsmux *mpgmuxts,
+ struct strc_strminfo new_strm_info)
+{
+ u32 i = 0;
+ struct stream_info *cptr;
+
+ cptr = (struct stream_info *)mpgmuxts->strmtbl_kaddrs;
+
+ /* Finding free memory block and writing input data into the block*/
+ for (i = 0; i < mpgmuxts->num_strmnodes; i++, cptr++) {
+ if (!cptr->usageflag) {
+ cptr->pid = new_strm_info.pid;
+ cptr->continuity_counter = 0;
+ cptr->usageflag = XTSMUX_STRMBL_BUSY;
+ cptr->strmtbl_update = new_strm_info.strmtbl_ctxt;
+ break;
+ }
+ }
+ if (i == mpgmuxts->num_strmnodes)
+ return -EIO;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_update_strmtbl(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ struct strc_strminfo new_strm_info;
+
+ ret = copy_from_user(&new_strm_info, arg, sizeof(struct strc_strminfo));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Reading strmInfo failed");
+ return ret;
+ }
+
+ return xlnx_tsmux_update_strminfo_table(mpgmuxts, new_strm_info);
+}
+
+static int xlnx_tsmux_enqueue_stream_context(struct xlnx_tsmux *mpgmuxts,
+ struct
+ stream_context_in * stream_data)
+{
+ struct stream_context_node *new_strm_node, *prev_strm_node;
+ void *kaddr_strm_node;
+ dma_addr_t strm_phy_addr;
+ unsigned long flags;
+ u32 i;
+
+ kaddr_strm_node = dma_pool_alloc(mpgmuxts->strm_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &strm_phy_addr);
+
+ new_strm_node = (struct stream_context_node *)kaddr_strm_node;
+ if (!new_strm_node)
+ return -ENOMEM;
+
+ /* update the stream context node */
+ wmb();
+ new_strm_node->element.command = stream_data->command;
+ new_strm_node->element.is_pcr_stream = stream_data->is_pcr_stream;
+ new_strm_node->element.stream_id = stream_data->stream_id;
+ new_strm_node->element.extended_stream_id =
+ stream_data->extended_stream_id;
+ new_strm_node->element.pid = stream_data->pid;
+ new_strm_node->element.size_data_in = stream_data->size_data_in;
+ new_strm_node->element.pts = stream_data->pts;
+ new_strm_node->element.dts = stream_data->dts;
+ new_strm_node->element.insert_pcr = stream_data->insert_pcr;
+ new_strm_node->element.pcr_base = stream_data->pcr_base;
+ new_strm_node->element.pcr_extension = stream_data->pcr_extension;
+
+ /* Check for external dma buffer */
+ if (!stream_data->is_dmabuf) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->srcbuf_addrs[stream_data->srcbuf_id];
+ new_strm_node->element.dmabuf_id = 0;
+ } else {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ /* Serching dma buf info based on srcbuf_id */
+ if (stream_data->srcbuf_id ==
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr;
+ new_strm_node->element.dmabuf_id =
+ mpgmuxts->src_dmabufintl[i].buf_id;
+ break;
+ }
+ }
+
+ /* No dma buf found with srcbuf_id*/
+ if (i == XTSMUX_MAXIN_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ stream_data->srcbuf_id);
+ return -ENOMEM;
+ }
+ }
+
+ new_strm_node->strm_phy_addr = (u64)strm_phy_addr;
+ new_strm_node->node_number = mpgmuxts->stcxt_node_cnt + 1;
+ mpgmuxts->stcxt_node_cnt++;
+ new_strm_node->node_status = UPDATED_BY_DRIVER;
+ new_strm_node->error_code = NO_ERROR;
+ new_strm_node->tail_pointer = 0;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ /* If it is not first stream in stream node linked list find
+ * physical address of current node and add to last node in list
+ */
+ if (!list_empty_careful(&mpgmuxts->strm_node)) {
+ prev_strm_node = list_last_entry(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ prev_strm_node->tail_pointer = new_strm_node->strm_phy_addr;
+ }
+ /* update the list and stream count */
+ wmb();
+ list_add_tail(&new_strm_node->node, &mpgmuxts->strm_node);
+ atomic_inc(&mpgmuxts->stream_count);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_stream_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct stream_context_in *stream_data;
+ int ret = 0;
+
+ stream_data = kzalloc(sizeof(*stream_data), GFP_KERNEL);
+ if (!stream_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(stream_data, arg,
+ sizeof(struct stream_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy stream data from user");
+ goto error_free;
+ }
+
+ ret = xlnx_tsmux_enqueue_stream_context(mpgmuxts, stream_data);
+
+error_free:
+ kfree(stream_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_stream_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_stream_desc(mpgmuxts, arg);
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Setting stream descripter failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_device_status(struct xlnx_tsmux *
+ mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_status;
+
+ ip_status = xlnx_tsmux_get_status(mpgmuxts);
+
+ if (ip_status == MPG2MUX_ERROR) {
+ dev_err(mpgmuxts->dev, "Failed to get device status");
+ return -EACCES;
+ }
+
+ if (ip_status == MPG2MUX_BUSY)
+ return -EBUSY;
+
+ return MPG2MUX_READY;
+}
+
+static int xlnx_tsmux_ioctl_start(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ int cnt, ret;
+
+ /* get IP status */
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ if (list_empty(&mpgmuxts->mux_node) ||
+ list_empty(&mpgmuxts->strm_node)) {
+ dev_err(mpgmuxts->dev, "No stream or mux to start device");
+ return -EIO;
+ }
+
+ cnt = atomic_read(&mpgmuxts->stream_count);
+ atomic_set(&mpgmuxts->intn_stream_count, cnt);
+
+ /* update streamid table */
+ ret = xlnx_tsmux_update_intstrm_tbl(mpgmuxts);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Update streamid intn table failed\n");
+ return ret;
+ }
+ return xlnx_tsmux_start_muxer(mpgmuxts);
+}
+
+static void xlnx_tsmux_free_dmalloc(struct xlnx_tsmux *mpgmuxts)
+{
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+}
+
+static int xlnx_tsmux_ioctl_stop(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ unsigned long flags;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ /* Free all driver allocated memory and reset linked list
+ * Reset IP registers
+ */
+ xlnx_tsmux_free_dmalloc(mpgmuxts);
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ xlnx_tsmux_stop_muxer(mpgmuxts);
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_status(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ enum xlnx_tsmux_status ip_stat;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+
+ ret = copy_to_user(arg, (void *)&ip_stat,
+ (unsigned long)(sizeof(enum xlnx_tsmux_status)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy device status to user");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_outbufinfo(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ int out_index;
+ struct out_buffer out_info;
+
+ out_info.buf_write = mpgmuxts->outbuf_written;
+ mpgmuxts->outbuf_written = 0;
+ out_index = atomic_read(&mpgmuxts->outbuf_idx);
+ if (out_index)
+ out_info.buf_id = 0;
+ else
+ out_info.buf_id = 1;
+
+ ret = copy_to_user(arg, (void *)&out_info,
+ (unsigned long)(sizeof(struct out_buffer)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy outbuf info");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_enqueue_mux_context(struct xlnx_tsmux *mpgmuxts,
+ struct muxer_context_in *mux_data)
+{
+ struct muxer_context *new_mux_node;
+ u32 out_index;
+ void *kaddr_mux_node;
+ dma_addr_t mux_phy_addr;
+ unsigned long flags;
+ s32 i;
+
+ kaddr_mux_node = dma_pool_alloc(mpgmuxts->mux_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &mux_phy_addr);
+
+ new_mux_node = (struct muxer_context *)kaddr_mux_node;
+ if (!new_mux_node)
+ return -EAGAIN;
+
+ new_mux_node->node_status = UPDATED_BY_DRIVER;
+ new_mux_node->mux_phy_addr = (u64)mux_phy_addr;
+
+ /* Check for external dma buffer */
+ if (!mux_data->is_dmabuf) {
+ out_index = 0;
+ new_mux_node->dst_buf_start_addr =
+ (u64)mpgmuxts->dstbuf_addrs[out_index];
+ new_mux_node->dst_buf_size = mpgmuxts->dstbuf_size;
+ if (out_index)
+ atomic_set(&mpgmuxts->outbuf_idx, 0);
+ else
+ atomic_set(&mpgmuxts->outbuf_idx, 1);
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (mux_data->dstbuf_id ==
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd) {
+ new_mux_node->dst_buf_start_addr =
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr;
+ break;
+ }
+ }
+ if (i == XTSMUX_MAXOUT_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ mux_data->dstbuf_id);
+ return -ENOMEM;
+ }
+ new_mux_node->dst_buf_size = mux_data->dmabuf_size;
+ }
+ new_mux_node->error_code = MUXER_NO_ERROR;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ list_add_tail(&new_mux_node->node, &mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_mux_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct muxer_context_in *mux_data;
+ int ret = 0;
+
+ mux_data = kzalloc(sizeof(*mux_data), GFP_KERNEL);
+ if (!mux_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(mux_data, arg,
+ sizeof(struct muxer_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "failed to copy muxer data from user");
+ goto kmem_free;
+ }
+
+ return xlnx_tsmux_enqueue_mux_context(mpgmuxts, mux_data);
+
+kmem_free:
+ kfree(mux_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_mux_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_mux_desc(mpgmuxts, arg);
+ if (ret < 0)
+ dev_dbg(mpgmuxts->dev, "Setting mux context failed");
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_verify_dmabuf(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct xlnx_tsmux_dmabuf_info *dbuf_info;
+ s32 i;
+ int ret = 0;
+
+ dbuf_info = kzalloc(sizeof(*dbuf_info), GFP_KERNEL);
+ if (!dbuf_info)
+ return -ENOMEM;
+
+ ret = copy_from_user(dbuf_info, arg,
+ sizeof(struct xlnx_tsmux_dmabuf_info));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy from user");
+ goto dmak_free;
+ }
+ if (dbuf_info->dir != DMA_TO_MPG2MUX &&
+ dbuf_info->dir != DMA_FROM_MPG2MUX) {
+ dev_err(mpgmuxts->dev, "Incorrect DMABUF direction %d",
+ dbuf_info->dir);
+ ret = -EINVAL;
+ goto dmak_free;
+ }
+ dbuf = dma_buf_get(dbuf_info->buf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(mpgmuxts->dev, "dma_buf_get fail fd %d direction %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(dbuf);
+ goto dmak_free;
+ }
+ attach = dma_buf_attach(dbuf, mpgmuxts->dev);
+ if (IS_ERR(attach)) {
+ dev_err(mpgmuxts->dev, "dma_buf_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+ sgt = dma_buf_map_attachment(attach,
+ (enum dma_data_direction)(dbuf_info->dir));
+ if (IS_ERR(sgt)) {
+ dev_err(mpgmuxts->dev, "dma_buf_map_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(sgt);
+ goto err_dmabuf_detach;
+ }
+
+ if (sgt->nents > 1) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "Not contig nents %d fd %d direction %d",
+ sgt->nents, dbuf_info->buf_fd, dbuf_info->dir);
+ goto err_dmabuf_unmap_attachment;
+ }
+ dev_dbg(mpgmuxts->dev, "dmabuf %s is physically contiguous",
+ (dbuf_info->dir ==
+ DMA_TO_MPG2MUX ? "Source" : "Destination"));
+
+ if (dbuf_info->dir == DMA_TO_MPG2MUX) {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ if (!mpgmuxts->src_dmabufintl[i].buf_id) {
+ mpgmuxts->src_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->src_dmabufintl[i].attach = attach;
+ mpgmuxts->src_dmabufintl[i].sgt = sgt;
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->src_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "%s: phy-addr=0x%llx for src dmabuf=%d",
+ __func__,
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External src streams more than XTSMUX_MAXIN_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXIN_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "src DMA bufs more than %d",
+ XTSMUX_MAXIN_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (!mpgmuxts->dst_dmabufintl[i].buf_id) {
+ mpgmuxts->dst_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->dst_dmabufintl[i].attach = attach;
+ mpgmuxts->dst_dmabufintl[i].sgt = sgt;
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->dst_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "phy-addr=0x%llx for src dmabuf=%d",
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External dst streams more than XTSMUX_MAXOUT_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXOUT_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "dst DMA bufs more than %d",
+ XTSMUX_MAXOUT_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ }
+
+ return 0;
+
+err_dmabuf_unmap_attachment:
+ dma_buf_unmap_attachment(attach, sgt,
+ (enum dma_data_direction)dbuf_info->dir);
+err_dmabuf_detach:
+ dma_buf_detach(dbuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dbuf);
+dmak_free:
+ kfree(dbuf_info);
+
+ return ret;
+}
+
+static long xlnx_tsmux_ioctl(struct file *fptr,
+ unsigned int cmd, unsigned long data)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ void __user *arg;
+ int ret;
+
+ mpgmuxts = fptr->private_data;
+ if (!mpgmuxts)
+ return -EINVAL;
+
+ arg = (void __user *)data;
+ switch (cmd) {
+ case MPG2MUX_INBUFALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_INBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_OUTBUFALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_OUTBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_STBLALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_STBLDEALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_TBLUPDATE:
+ ret = xlnx_tsmux_ioctl_update_strmtbl(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETSTRM:
+ ret = xlnx_tsmux_ioctl_set_stream_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_START:
+ ret = xlnx_tsmux_ioctl_start(mpgmuxts);
+ break;
+ case MPG2MUX_STOP:
+ ret = xlnx_tsmux_ioctl_stop(mpgmuxts);
+ break;
+ case MPG2MUX_STATUS:
+ ret = xlnx_tsmux_ioctl_get_status(mpgmuxts, arg);
+ break;
+ case MPG2MUX_GETOUTBUF:
+ ret = xlnx_tsmux_ioctl_get_outbufinfo(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETMUX:
+ ret = xlnx_tsmux_ioctl_set_mux_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_VDBUF:
+ ret = xlnx_tsmux_ioctl_verify_dmabuf(mpgmuxts, arg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ dev_err(mpgmuxts->dev, "ioctl %d failed\n", cmd);
+
+ return ret;
+}
+
+static int xlnx_tsmux_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct xlnx_tsmux *mpgmuxts = fp->private_data;
+ int ret, buf_id;
+
+ if (!mpgmuxts)
+ return -ENODEV;
+
+ buf_id = vma->vm_pgoff;
+
+ if (buf_id < mpgmuxts->num_inbuf) {
+ if (!mpgmuxts->srcbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated for src %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->srcbuf_addrs[buf_id] >>
+ PAGE_SHIFT, vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail bufid = %d", buf_id);
+ return -EINVAL;
+ }
+ } else if (buf_id < (mpgmuxts->num_inbuf + mpgmuxts->num_outbuf)) {
+ buf_id -= mpgmuxts->num_inbuf;
+ if (!mpgmuxts->dstbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated fordst %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret =
+ remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->dstbuf_addrs[buf_id] >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail buf_id = %d", buf_id);
+ ret = -EINVAL;
+ }
+ } else {
+ dev_err(mpgmuxts->dev, "Wrong buffer id -> %d buf", buf_id);
+ return -EINVAL;
+ }
+ fp->private_data = mpgmuxts;
+ return 0;
+}
+
+static __poll_t xlnx_tsmux_poll(struct file *fptr, poll_table *wait)
+{
+ struct xlnx_tsmux *mpgmuxts = fptr->private_data;
+
+ poll_wait(fptr, &mpgmuxts->waitq, wait);
+
+ if (xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED))
+ return POLLIN | POLLPRI;
+
+ return 0;
+}
+
+static const struct file_operations mpg2mux_fops = {
+ .open = xlnx_tsmux_open,
+ .release = xlnx_tsmux_release,
+ .unlocked_ioctl = xlnx_tsmux_ioctl,
+ .mmap = xlnx_tsmux_mmap,
+ .poll = xlnx_tsmux_poll,
+};
+
+static void xlnx_tsmux_free_dmabufintl(struct xlnx_tsmux_dmabufintl
+ *intl_dmabuf, u16 dmabuf_id,
+ enum xlnx_tsmux_dma_dir dir)
+{
+ unsigned int i = dmabuf_id - 1;
+
+ if (intl_dmabuf[i].dmabuf_fd) {
+ dma_buf_unmap_attachment(intl_dmabuf[i].attach,
+ intl_dmabuf[i].sgt,
+ (enum dma_data_direction)dir);
+ dma_buf_detach(intl_dmabuf[i].dbuf, intl_dmabuf[i].attach);
+ dma_buf_put(intl_dmabuf[i].dbuf);
+ intl_dmabuf[i].dmabuf_fd = 0;
+ intl_dmabuf[i].buf_id = 0;
+ }
+}
+
+static int xlnx_tsmux_update_complete(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *tstrm_node;
+ struct muxer_context *temp_mux;
+ u32 num_strm_node, i;
+ u32 num_strms;
+ unsigned long flags;
+
+ num_strm_node = xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED);
+ if (num_strm_node == 0)
+ return -1;
+
+ /* Removing completed stream nodes from the list */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ num_strms = atomic_read(&mpgmuxts->intn_stream_count);
+ for (i = 0; i < num_strms; i++) {
+ tstrm_node =
+ list_first_entry(&mpgmuxts->strm_node,
+ struct stream_context_node, node);
+ list_del(&tstrm_node->node);
+ atomic_dec(&mpgmuxts->stream_count);
+ if (tstrm_node->element.dmabuf_id)
+ xlnx_tsmux_free_dmabufintl
+ (mpgmuxts->src_dmabufintl,
+ tstrm_node->element.dmabuf_id,
+ DMA_TO_MPG2MUX);
+ if (tstrm_node->node_number == num_strm_node) {
+ dma_pool_free(mpgmuxts->strm_ctx_pool, tstrm_node,
+ tstrm_node->strm_phy_addr);
+ break;
+ }
+ }
+
+ /* Removing completed mux nodes from the list */
+ temp_mux = list_first_entry(&mpgmuxts->mux_node, struct muxer_context,
+ node);
+ mpgmuxts->outbuf_written = temp_mux->dst_buf_written;
+
+ list_del(&temp_mux->node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t xlnx_tsmux_intr_handler(int irq, void *ctx)
+{
+ u32 status;
+ struct xlnx_tsmux *mpgmuxts = (struct xlnx_tsmux *)ctx;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_ISR_STAT);
+ status &= XTSMUX_IER_ENABLE_MASK;
+
+ if (status) {
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_ISR_STAT, status);
+ xlnx_tsmux_update_complete(mpgmuxts);
+ if (mpgmuxts->outbuf_written)
+ wake_up_interruptible(&mpgmuxts->waitq);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xlnx_tsmux_probe(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ struct device *dev = &pdev->dev;
+ struct device *dev_crt;
+ struct resource *dev_resrc;
+ int ret = -1;
+ unsigned long flags;
+
+ /* DRIVER_MAX_DEV is to limit the number of instances, but
+ * Initial version is tested with single instance only.
+ * TODO: replace atomic_read with ida_simple_get
+ */
+ if (atomic_read(&xlnx_tsmux_ndevs) >= DRIVER_MAX_DEV) {
+ dev_err(&pdev->dev, "Limit of %d number of device is reached",
+ DRIVER_MAX_DEV);
+ return -EIO;
+ }
+
+ mpgmuxts = devm_kzalloc(&pdev->dev, sizeof(struct xlnx_tsmux),
+ GFP_KERNEL);
+ if (!mpgmuxts)
+ return -ENOMEM;
+ mpgmuxts->dev = &pdev->dev;
+ dev_resrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpgmuxts->iomem = devm_ioremap_resource(mpgmuxts->dev, dev_resrc);
+ if (IS_ERR(mpgmuxts->iomem))
+ return PTR_ERR(mpgmuxts->iomem);
+
+ mpgmuxts->irq = irq_of_parse_and_map(mpgmuxts->dev->of_node, 0);
+ if (!mpgmuxts->irq) {
+ dev_err(mpgmuxts->dev, "Unable to get IRQ");
+ return -EINVAL;
+ }
+
+ mpgmuxts->ap_clk = devm_clk_get(dev, "ap_clk");
+ if (IS_ERR(mpgmuxts->ap_clk)) {
+ ret = PTR_ERR(mpgmuxts->ap_clk);
+ dev_err(dev, "failed to get ap clk %d\n", ret);
+ goto cdev_err;
+ }
+ ret = clk_prepare_enable(mpgmuxts->ap_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ap clk %d\n", ret);
+ goto err_disable_ap_clk;
+ }
+
+ /* Initializing variables used in Muxer */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ mpgmuxts->strm_ctx_pool = dma_pool_create("strcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_ALIGN,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+ if (!mpgmuxts->strm_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for strm ctx pool");
+ return -ENOMEM;
+ }
+
+ mpgmuxts->mux_ctx_pool = dma_pool_create("muxcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+
+ if (!mpgmuxts->mux_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for mux ctx pool");
+ goto mux_err;
+ }
+
+ init_waitqueue_head(&mpgmuxts->waitq);
+
+ ret = devm_request_irq(mpgmuxts->dev, mpgmuxts->irq,
+ xlnx_tsmux_intr_handler, IRQF_SHARED,
+ DRIVER_NAME, mpgmuxts);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Unable to register IRQ");
+ goto mux_err;
+ }
+
+ cdev_init(&mpgmuxts->chdev, &mpg2mux_fops);
+ mpgmuxts->chdev.owner = THIS_MODULE;
+ mpgmuxts->id = atomic_read(&xlnx_tsmux_ndevs);
+ ret = cdev_add(&mpgmuxts->chdev, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id), 1);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "cdev_add failed");
+ goto cadd_err;
+ }
+
+ dev_crt = device_create(xlnx_tsmux_class, mpgmuxts->dev,
+ MKDEV(MAJOR(xlnx_tsmux_devt), mpgmuxts->id),
+ mpgmuxts, "mpgmuxts%d", mpgmuxts->id);
+
+ if (IS_ERR(dev_crt)) {
+ ret = PTR_ERR(dev_crt);
+ dev_err(mpgmuxts->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ dev_info(mpgmuxts->dev,
+ "Xilinx mpeg2 TS muxer device probe completed");
+
+ atomic_inc(&xlnx_tsmux_ndevs);
+
+ return 0;
+
+err_disable_ap_clk:
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+cdev_err:
+ cdev_del(&mpgmuxts->chdev);
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+cadd_err:
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+mux_err:
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ return ret;
+}
+
+static int xlnx_tsmux_remove(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+
+ mpgmuxts = platform_get_drvdata(pdev);
+ if (!mpgmuxts || !xlnx_tsmux_class)
+ return -EIO;
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+ cdev_del(&mpgmuxts->chdev);
+ atomic_dec(&xlnx_tsmux_ndevs);
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_tsmux_of_match[] = {
+ { .compatible = "xlnx,tsmux-1.0", },
+ { }
+};
+
+static struct platform_driver xlnx_tsmux_driver = {
+ .probe = xlnx_tsmux_probe,
+ .remove = xlnx_tsmux_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_tsmux_of_match,
+ },
+};
+
+static int __init xlnx_tsmux_mod_init(void)
+{
+ int err;
+
+ xlnx_tsmux_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(xlnx_tsmux_class)) {
+ pr_err("%s : Unable to create driver class", __func__);
+ return PTR_ERR(xlnx_tsmux_class);
+ }
+
+ err = alloc_chrdev_region(&xlnx_tsmux_devt, 0, DRIVER_MAX_DEV,
+ DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s : Unable to get major number", __func__);
+ goto err_class;
+ }
+
+ err = platform_driver_register(&xlnx_tsmux_driver);
+ if (err < 0) {
+ pr_err("%s : Unable to register %s driver", __func__,
+ DRIVER_NAME);
+ goto err_driver;
+ }
+
+ return 0;
+
+err_driver:
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+err_class:
+ class_destroy(xlnx_tsmux_class);
+
+ return err;
+}
+
+static void __exit xlnx_tsmux_mod_exit(void)
+{
+ platform_driver_unregister(&xlnx_tsmux_driver);
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+ class_destroy(xlnx_tsmux_class);
+ xlnx_tsmux_class = NULL;
+}
+
+module_init(xlnx_tsmux_mod_init);
+module_exit(xlnx_tsmux_mod_exit);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx mpeg2 transport stream muxer IP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnxsync/Kconfig b/drivers/staging/xlnxsync/Kconfig
new file mode 100644
index 000000000000..08e73384dc94
--- /dev/null
+++ b/drivers/staging/xlnxsync/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_SYNC
+ tristate "Xilinx Synchronizer"
+ depends on ARCH_ZYNQMP
+ help
+ This driver is developed for Xilinx Synchronizer IP. It is used to
+ monitor the AXI addresses of the producer and initiate the
+ consumer to start earlier, thereby reducing the latency to process
+ the data.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N
diff --git a/drivers/staging/xlnxsync/MAINTAINERS b/drivers/staging/xlnxsync/MAINTAINERS
new file mode 100644
index 000000000000..e2d720419783
--- /dev/null
+++ b/drivers/staging/xlnxsync/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX SYNCHRONIZER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnxsync
diff --git a/drivers/staging/xlnxsync/Makefile b/drivers/staging/xlnxsync/Makefile
new file mode 100644
index 000000000000..b126a36da37c
--- /dev/null
+++ b/drivers/staging/xlnxsync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync.o
diff --git a/drivers/staging/xlnxsync/dt-binding.txt b/drivers/staging/xlnxsync/dt-binding.txt
new file mode 100644
index 000000000000..f1ed9d724de8
--- /dev/null
+++ b/drivers/staging/xlnxsync/dt-binding.txt
@@ -0,0 +1,34 @@
+Xilinx Synchronizer
+-------------------
+
+The Xilinx Synchronizer is used for buffer synchronization between
+producer and consumer blocks. It manages to do so by tapping onto the bus
+where the producer block is writing frame data to memory and consumer block is
+reading the frame data from memory.
+
+It can work on the encode path with max 4 channels or on decode path with
+max 2 channels.
+
+Required properties:
+- compatible : Must contain "xlnx,sync-ip-1.0"
+- reg: Physical base address and length of the registers set for the device.
+- interrupts: Contains the interrupt line number.
+- interrupt-parent: phandle to interrupt controller.
+- clock-names: The input clock names for axilite, producer and consumer clock.
+- clocks: Reference to the clock that drives the axi interface, producer and consumer.
+- xlnx,num-chan: Range from 1 to 2 for decode.
+ Range from 1 to 4 for encode.
+
+Optional properties:
+- xlnx,encode: Present if IP configured for encoding path, else absent.
+
+v_sync_vcu: subframe_sync_vcu@a00e0000 {
+ compatible = "xlnx,sync-ip-1.0";
+ reg = <0x0 0xa00e0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 96 4>;
+ clock-names = "s_axi_ctrl_aclk", "s_axi_mm_p_aclk", "s_axi_mm_aclk";
+ clocks = <&vid_s_axi_clk>, <&vid_stream_clk>, <&vid_stream_clk>;
+ xlnx,num-chan = <4>;
+ xlnx,encode;
+};
diff --git a/drivers/staging/xlnxsync/xlnxsync.c b/drivers/staging/xlnxsync/xlnxsync.c
new file mode 100644
index 000000000000..d534f9f9841d
--- /dev/null
+++ b/drivers/staging/xlnxsync/xlnxsync.c
@@ -0,0 +1,1301 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Synchronizer IP driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver is used to control the Xilinx Synchronizer IP
+ * to achieve sub frame latency for encode and decode with VCU.
+ * This is done by monitoring the address lines for specific values.
+ */
+
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/xlnxsync.h>
+
+/* Register offsets and bit masks */
+#define XLNXSYNC_CTRL_REG 0x00
+#define XLNXSYNC_ISR_REG 0x04
+/* Producer Luma/Chroma Start/End Address */
+#define XLNXSYNC_PL_START_LO_REG 0x08
+#define XLNXSYNC_PL_START_HI_REG 0x0C
+#define XLNXSYNC_PC_START_LO_REG 0x20
+#define XLNXSYNC_PC_START_HI_REG 0x24
+#define XLNXSYNC_PL_END_LO_REG 0x38
+#define XLNXSYNC_PL_END_HI_REG 0x3C
+#define XLNXSYNC_PC_END_LO_REG 0x50
+#define XLNXSYNC_PC_END_HI_REG 0x54
+#define XLNXSYNC_L_MARGIN_REG 0x68
+#define XLNXSYNC_C_MARGIN_REG 0x74
+#define XLNXSYNC_IER_REG 0x80
+#define XLNXSYNC_DBG_REG 0x84
+/* Consumer Luma/Chroma Start/End Address */
+#define XLNXSYNC_CL_START_LO_REG 0x88
+#define XLNXSYNC_CL_START_HI_REG 0x8C
+#define XLNXSYNC_CC_START_LO_REG 0xA0
+#define XLNXSYNC_CC_START_HI_REG 0xA4
+#define XLNXSYNC_CL_END_LO_REG 0xB8
+#define XLNXSYNC_CL_END_HI_REG 0xBC
+#define XLNXSYNC_CC_END_LO_REG 0xD0
+#define XLNXSYNC_CC_END_HI_REG 0xD4
+
+/* Luma/Chroma Core offset registers */
+#define XLNXSYNC_LCOREOFF_REG 0x400
+#define XLNXSYNC_CCOREOFF_REG 0x410
+#define XLNXSYNC_COREOFF_NEXT 0x4
+
+#define XLNXSYNC_CTRL_ENCDEC_MASK BIT(0)
+#define XLNXSYNC_CTRL_ENABLE_MASK BIT(1)
+#define XLNXSYNC_CTRL_INTR_EN_MASK BIT(2)
+#define XLNXSYNC_CTRL_SOFTRESET BIT(3)
+
+#define XLNXSYNC_ISR_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_ISR_WDG_ERR_MASK BIT(1)
+/* Producer related */
+#define XLNXSYNC_ISR_PLDONE_SHIFT (2)
+#define XLNXSYNC_ISR_PLDONE_MASK GENMASK(3, 2)
+#define XLNXSYNC_ISR_PLSKIP_MASK BIT(4)
+#define XLNXSYNC_ISR_PLVALID_MASK BIT(5)
+#define XLNXSYNC_ISR_PCDONE_SHIFT (6)
+#define XLNXSYNC_ISR_PCDONE_MASK GENMASK(7, 6)
+#define XLNXSYNC_ISR_PCSKIP_MASK BIT(8)
+#define XLNXSYNC_ISR_PCVALID_MASK BIT(9)
+/* Consumer related */
+#define XLNXSYNC_ISR_CLDONE_SHIFT (10)
+#define XLNXSYNC_ISR_CLDONE_MASK GENMASK(11, 10)
+#define XLNXSYNC_ISR_CLSKIP_MASK BIT(12)
+#define XLNXSYNC_ISR_CLVALID_MASK BIT(13)
+#define XLNXSYNC_ISR_CCDONE_SHIFT (14)
+#define XLNXSYNC_ISR_CCDONE_MASK GENMASK(15, 14)
+#define XLNXSYNC_ISR_CCSKIP_MASK BIT(16)
+#define XLNXSYNC_ISR_CCVALID_MASK BIT(17)
+
+#define XLNXSYNC_ISR_LDIFF BIT(18)
+#define XLNXSYNC_ISR_CDIFF BIT(19)
+
+/* bit 44 of start address */
+#define XLNXSYNC_FB_VALID_MASK BIT(12)
+#define XLNXSYNC_FB_HI_ADDR_MASK GENMASK(11, 0)
+
+#define XLNXSYNC_IER_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_IER_WDG_ERR_MASK BIT(1)
+/* Producer */
+#define XLNXSYNC_IER_PLVALID_MASK BIT(5)
+#define XLNXSYNC_IER_PCVALID_MASK BIT(9)
+/* Consumer */
+#define XLNXSYNC_IER_CLVALID_MASK BIT(13)
+#define XLNXSYNC_IER_CCVALID_MASK BIT(17)
+/* Diff */
+#define XLNXSYNC_IER_LDIFF BIT(18)
+#define XLNXSYNC_IER_CDIFF BIT(19)
+
+#define XLNXSYNC_IER_ALL_MASK (XLNXSYNC_IER_SYNC_FAIL_MASK |\
+ XLNXSYNC_IER_WDG_ERR_MASK |\
+ XLNXSYNC_IER_PLVALID_MASK |\
+ XLNXSYNC_IER_PCVALID_MASK |\
+ XLNXSYNC_IER_CLVALID_MASK |\
+ XLNXSYNC_IER_CCVALID_MASK |\
+ XLNXSYNC_IER_LDIFF |\
+ XLNXSYNC_IER_CDIFF)
+
+/* Other macros */
+#define XLNXSYNC_CHAN_OFFSET 0x100
+
+#define XLNXSYNC_DEVNAME_LEN (32)
+
+#define XLNXSYNC_DRIVER_NAME "xlnxsync"
+#define XLNXSYNC_DRIVER_VERSION "0.1"
+
+#define XLNXSYNC_DEV_MAX 256
+
+/* Module Parameters */
+static struct class *xlnxsync_class;
+static dev_t xlnxsync_devt;
+/* Used to keep track of sync devices */
+static DEFINE_IDA(xs_ida);
+
+/**
+ * struct xlnxsync_device - Xilinx Synchronizer struct
+ * @chdev: Character device driver struct
+ * @dev: Pointer to device
+ * @iomem: Pointer to the register space
+ * @sync_mutex: Serialize general device specific ioctl calls
+ * @axi_clk: Pointer to clock structure for axilite clock
+ * @p_clk: Pointer to clock structure for producer clock
+ * @c_clk: Pointer to clock structure for consumer clock
+ * @user_count: Usage count
+ * @irq: IRQ number
+ * @irq_lock: Spinlock used to protect access to sync and watchdog error
+ * @minor: Device id count
+ * @config: IP config struct
+ * @channels: List head for syncip channel linked list
+ * @chan_count : Active channel number count
+ * @reserved : Bitmap to track reserved channels
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_device {
+ struct cdev chdev;
+ struct device *dev;
+ void __iomem *iomem;
+ /* sync_mutex is used to serialize general device ioctl calls */
+ struct mutex sync_mutex;
+ struct clk *axi_clk;
+ struct clk *p_clk;
+ struct clk *c_clk;
+ atomic_t user_count;
+ unsigned int irq;
+ /* irq_lock is used to protect access to sync_err and wdg_err */
+ spinlock_t irq_lock;
+ u32 minor;
+ struct xlnxsync_config config;
+ struct list_head channels;
+ u8 chan_count;
+ unsigned long reserved;
+};
+
+/**
+ * struct xlnxsync_channel - Synchronizer context struct
+ * @dev: Xilinx synchronizer device struct
+ * @mutex: Serialize channel specific ioctl calls
+ * @id: Channel id
+ * @channel: list entry into syncip channel lists
+ * @wq_fbdone: Wait queue for frame buffer done events
+ * @wq_error: Wait queue for error events
+ * @l_done: Luma done result array
+ * @c_done: Chroma done result array
+ * @sync_err: Capture synchronization error per channel
+ * @wdg_err: Capture watchdog error per channel
+ * @ldiff_err: Luma buffer diff > 1
+ * @cdiff_err: Chroma buffer diff > 1
+ * @err_event: Error event per channel
+ * @framedone_event: Framebuffer done event per channel
+ *
+ * This structure contains the syncip channel specific parameters
+ */
+struct xlnxsync_channel {
+ struct xlnxsync_device *dev;
+ /* Serialize channel specific ioctl calls */
+ struct mutex mutex;
+ u32 id;
+ struct list_head channel;
+ wait_queue_head_t wq_fbdone;
+ wait_queue_head_t wq_error;
+ u8 l_done[XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ u8 c_done[XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ u8 sync_err : 1;
+ u8 wdg_err : 1;
+ u8 ldiff_err : 1;
+ u8 cdiff_err : 1;
+ u8 err_event : 1;
+ u8 framedone_event : 1;
+};
+
+static inline u32 xlnxsync_read(struct xlnxsync_device *dev, u32 chan, u32 reg)
+{
+ return ioread32(dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_write(struct xlnxsync_device *dev, u32 chan,
+ u32 reg, u32 val)
+{
+ iowrite32(val, dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_clr(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 clr)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) & ~clr);
+}
+
+static inline void xlnxsync_set(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 set)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) | set);
+}
+
+static bool xlnxsync_is_buf_done(struct xlnxsync_device *dev,
+ u32 channel, u32 buf, u32 io)
+{
+ u32 luma_valid, chroma_valid;
+ u32 reg_laddr, reg_caddr;
+
+ switch (io) {
+ case XLNXSYNC_PROD:
+ reg_laddr = XLNXSYNC_PL_START_HI_REG;
+ reg_caddr = XLNXSYNC_PC_START_HI_REG;
+ break;
+ case XLNXSYNC_CONS:
+ reg_laddr = XLNXSYNC_CL_START_HI_REG;
+ reg_caddr = XLNXSYNC_CC_START_HI_REG;
+ break;
+ default:
+ return false;
+ }
+
+ luma_valid = xlnxsync_read(dev, channel, reg_laddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ chroma_valid = xlnxsync_read(dev, channel, reg_caddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ if (!luma_valid && !chroma_valid)
+ return true;
+
+ return false;
+}
+
+static void xlnxsync_reset_chan(struct xlnxsync_device *dev, u32 chan)
+{
+ u8 num_retries = 50;
+
+ xlnxsync_set(dev, chan, XLNXSYNC_CTRL_REG, XLNXSYNC_CTRL_SOFTRESET);
+ /* Wait for a maximum of ~100ms to flush pending transactions */
+ while (num_retries--) {
+ if (!(xlnxsync_read(dev, chan, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_SOFTRESET))
+ break;
+ usleep_range(2000, 2100);
+ }
+}
+
+static void xlnxsync_reset(struct xlnxsync_device *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ xlnxsync_reset_chan(dev, i);
+}
+
+static dma_addr_t xlnxsync_get_phy_addr(struct xlnxsync_device *dev,
+ u32 fd)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t phy_addr = 0;
+
+ dbuf = dma_buf_get(fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev, "%s : Failed to get dma buf\n", __func__);
+ goto get_phy_addr_err;
+ }
+
+ attach = dma_buf_attach(dbuf, dev->dev);
+ if (IS_ERR(attach)) {
+ dev_err(dev->dev, "%s : Failed to attach buf\n", __func__);
+ goto fail_attach;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev, "%s : Failed to attach map\n", __func__);
+ goto fail_map;
+ }
+
+ phy_addr = sg_dma_address(sgt->sgl);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+
+fail_map:
+ dma_buf_detach(dbuf, attach);
+fail_attach:
+ dma_buf_put(dbuf);
+get_phy_addr_err:
+ return phy_addr;
+}
+
+static int xlnxsync_chan_config(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ struct xlnxsync_chan_config cfg;
+ int ret, i = 0, j;
+ dma_addr_t phy_start_address;
+ u64 luma_start_address[XLNXSYNC_IO];
+ u64 chroma_start_address[XLNXSYNC_IO];
+ u64 luma_end_address[XLNXSYNC_IO];
+ u64 chroma_end_address[XLNXSYNC_IO];
+ struct xlnxsync_device *dev = channel->dev;
+
+ ret = copy_from_user(&cfg, arg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (cfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ cfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Calculate luma/chroma physical addresses */
+ phy_start_address = xlnxsync_get_phy_addr(dev, cfg.dma_fd);
+ if (!phy_start_address) {
+ dev_err(dev->dev, "%s : Failed to obtain physical address\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ luma_start_address[XLNXSYNC_PROD] =
+ cfg.luma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_start_address[XLNXSYNC_CONS] =
+ cfg.luma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_start_address[XLNXSYNC_PROD] =
+ cfg.chroma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_start_address[XLNXSYNC_CONS] =
+ cfg. chroma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ luma_end_address[XLNXSYNC_PROD] =
+ cfg.luma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_end_address[XLNXSYNC_CONS] =
+ cfg.luma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_end_address[XLNXSYNC_PROD] =
+ cfg.chroma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_end_address[XLNXSYNC_CONS] =
+ cfg.chroma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+
+ dev_dbg(dev->dev, "Channel id = %d", channel->id);
+ dev_dbg(dev->dev, "Producer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ luma_start_address[XLNXSYNC_PROD],
+ luma_end_address[XLNXSYNC_PROD], cfg.luma_margin);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ chroma_start_address[XLNXSYNC_PROD],
+ chroma_end_address[XLNXSYNC_PROD], cfg.chroma_margin);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_PROD], cfg.ismono[XLNXSYNC_PROD]);
+ dev_dbg(dev->dev, "Consumer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ luma_start_address[XLNXSYNC_CONS],
+ luma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ chroma_start_address[XLNXSYNC_CONS],
+ chroma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_CONS], cfg.ismono[XLNXSYNC_CONS]);
+
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ u32 l_start_reg, l_end_reg, c_start_reg, c_end_reg;
+
+ if (cfg.fb_id[j] == XLNXSYNC_AUTO_SEARCH) {
+ /*
+ * When fb_id is 0xFF auto search for free fb
+ * in a channel
+ */
+ dev_dbg(dev->dev, "%s : auto search free fb\n",
+ __func__);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ if (xlnxsync_is_buf_done(dev, channel->id, i,
+ j))
+ break;
+ dev_dbg(dev->dev, "Channel %d %s FB %d is busy\n",
+ channel->id, j ? "prod" : "cons", i);
+ }
+
+ if (i == XLNXSYNC_BUF_PER_CHAN)
+ return -EBUSY;
+
+ } else if (cfg.fb_id[j] >= 0 &&
+ cfg.fb_id[j] < XLNXSYNC_BUF_PER_CHAN) {
+ /* If fb_id is specified, check its availability */
+ if (!(xlnxsync_is_buf_done(dev, channel->id,
+ cfg.fb_id[j], j))) {
+ dev_dbg(dev->dev,
+ "%s : %s FB %d in channel %d is busy!\n",
+ __func__, j ? "prod" : "cons",
+ i, channel->id);
+ return -EBUSY;
+ }
+ dev_dbg(dev->dev, "%s : Configure fb %d\n",
+ __func__, i);
+ } else {
+ /* Invalid fb_id passed */
+ dev_err(dev->dev, "Invalid FB id %d for configuration!\n",
+ cfg.fb_id[j]);
+ return -EINVAL;
+ }
+
+ if (j == XLNXSYNC_PROD) {
+ l_start_reg = XLNXSYNC_PL_START_LO_REG;
+ l_end_reg = XLNXSYNC_PL_END_LO_REG;
+ c_start_reg = XLNXSYNC_PC_START_LO_REG;
+ c_end_reg = XLNXSYNC_PC_END_LO_REG;
+ } else {
+ l_start_reg = XLNXSYNC_CL_START_LO_REG;
+ l_end_reg = XLNXSYNC_CL_END_LO_REG;
+ c_start_reg = XLNXSYNC_CC_START_LO_REG;
+ c_end_reg = XLNXSYNC_CC_END_LO_REG;
+ }
+
+ /* Start Address */
+ xlnxsync_write(dev, channel->id, l_start_reg + (i << 3),
+ lower_32_bits(luma_start_address[j]));
+
+ xlnxsync_write(dev, channel->id,
+ (l_start_reg + 4) + (i << 3),
+ upper_32_bits(luma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* End Address */
+ xlnxsync_write(dev, channel->id, l_end_reg + (i << 3),
+ lower_32_bits(luma_end_address[j]));
+ xlnxsync_write(dev, channel->id, l_end_reg + 4 + (i << 3),
+ upper_32_bits(luma_end_address[j]));
+
+ /* Set margin */
+ xlnxsync_write(dev, channel->id,
+ XLNXSYNC_L_MARGIN_REG + (i << 2),
+ cfg.luma_margin);
+
+ if (!cfg.ismono[j]) {
+ dev_dbg(dev->dev, "%s : Not monochrome. Program Chroma\n",
+ __func__);
+
+ /* Chroma Start Address */
+ xlnxsync_write(dev, channel->id,
+ c_start_reg + (i << 3),
+ lower_32_bits(chroma_start_address[j]));
+
+ xlnxsync_write(dev, channel->id,
+ c_start_reg + 4 + (i << 3),
+ upper_32_bits(chroma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* Chroma End Address */
+ xlnxsync_write(dev, channel->id,
+ c_end_reg + (i << 3),
+ lower_32_bits(chroma_end_address[j]));
+
+ xlnxsync_write(dev, channel->id,
+ c_end_reg + 4 + (i << 3),
+ upper_32_bits(chroma_end_address[j]));
+
+ /* Chroma Margin */
+ xlnxsync_write(dev, channel->id,
+ XLNXSYNC_C_MARGIN_REG + (i << 2),
+ cfg.chroma_margin);
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, channel->id,
+ c_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, channel->id, l_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ for (i = 0; i < XLNXSYNC_MAX_CORES; i++) {
+ iowrite32(cfg.luma_core_offset[i],
+ dev->iomem + XLNXSYNC_LCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+
+ iowrite32(cfg.chroma_core_offset[i],
+ dev->iomem + XLNXSYNC_CCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+ }
+
+ return 0;
+}
+
+static int xlnxsync_chan_get_status(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ int ret;
+ u32 i, j;
+ unsigned long flags;
+ struct xlnxsync_stat status;
+ struct xlnxsync_device *dev = channel->dev;
+
+ /* Update Buffers status */
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ if (xlnxsync_is_buf_done(dev, channel->id, i, j))
+ status.fbdone[i][j] = true;
+ else
+ status.fbdone[i][j] = false;
+ }
+ }
+
+ /* Update channel enable status */
+ if (xlnxsync_read(dev, channel->id, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK)
+ status.enable = true;
+
+ /* Update channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ status.sync_err = channel->sync_err;
+ status.wdg_err = channel->wdg_err;
+ status.ldiff_err = channel->ldiff_err;
+ status.cdiff_err = channel->cdiff_err;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ status.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ ret = copy_to_user(arg, &status, sizeof(status));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+}
+
+static int xlnxsync_chan_enable(struct xlnxsync_channel *channel, bool enable)
+{
+ struct xlnxsync_device *dev = channel->dev;
+ unsigned int i, j;
+
+ if (dev->config.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "ioctl not supported!\n");
+ return -EINVAL;
+ }
+
+ /* check channel v/s max from dt */
+ if (channel->id >= dev->config.max_channels) {
+ dev_err(dev->dev, "Invalid channel %d. Max channels = %d!\n",
+ channel->id, dev->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ dev_dbg(dev->dev, "Enabling %d channel\n", channel->id);
+ xlnxsync_set(dev, channel->id, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ xlnxsync_set(dev, channel->id, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ } else {
+ dev_dbg(dev->dev, "Disabling %d channel\n", channel->id);
+ xlnxsync_reset_chan(dev, channel->id);
+ xlnxsync_clr(dev, channel->id, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(dev, channel->id, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ channel->sync_err = false;
+ channel->wdg_err = false;
+ channel->ldiff_err = false;
+ channel->cdiff_err = false;
+
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ channel->l_done[i][j] = false;
+ channel->c_done[i][j] = false;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_config(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ struct xlnxsync_config cfg;
+ int ret;
+ struct xlnxsync_device *dev = channel->dev;
+
+ cfg.encode = dev->config.encode;
+ cfg.max_channels = dev->config.max_channels;
+ cfg.active_channels = dev->chan_count;
+ cfg.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+ cfg.reserved_id = channel->id;
+ dev_dbg(dev->dev, "IP Config : encode = %d max_channels = %d\n",
+ cfg.encode, cfg.max_channels);
+ dev_dbg(dev->dev, "IP Config : active channels = %d reserved id = %d\n",
+ cfg.active_channels, cfg.reserved_id);
+ dev_dbg(dev->dev, "ioctl version = 0x%llx\n", cfg.hdr_ver);
+ ret = copy_to_user(arg, &cfg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_chan_clr_err(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ struct xlnxsync_clr_err errcfg;
+ int ret;
+ unsigned long flags;
+ struct xlnxsync_device *dev = channel->dev;
+
+ ret = copy_from_user(&errcfg, arg, sizeof(errcfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (errcfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ errcfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s : Clearing %d channel errors\n",
+ __func__, channel->id);
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (channel->sync_err) {
+ dev_dbg(dev->dev, "Clearing sync err\n");
+ channel->sync_err = false;
+ }
+
+ if (channel->wdg_err) {
+ dev_dbg(dev->dev, "Clearing wdg err\n");
+ channel->wdg_err = false;
+ }
+
+ if (channel->ldiff_err) {
+ dev_dbg(dev->dev, "Clearing ldiff_err err\n");
+ channel->ldiff_err = false;
+ }
+
+ if (channel->cdiff_err) {
+ dev_dbg(dev->dev, "Clearing cdiff_err err\n");
+ channel->cdiff_err = false;
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_chan_get_fbdone_status(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbdone_stat;
+ int ret, i, j;
+ struct xlnxsync_device *dev = channel->dev;
+
+ fbdone_stat.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++)
+ for (j = 0; j < XLNXSYNC_IO; j++)
+ if (channel->l_done[i][j] &&
+ channel->c_done[i][j])
+ fbdone_stat.status[i][j] = true;
+
+ ret = copy_to_user(arg, &fbdone_stat, sizeof(fbdone_stat));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+
+ return ret;
+}
+
+static int xlnxsync_chan_clr_fbdone_status(struct xlnxsync_channel *channel,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbd;
+ int ret, i, j;
+ unsigned long flags;
+ struct xlnxsync_device *dev = channel->dev;
+
+ ret = copy_from_user(&fbd, arg, sizeof(fbd));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (fbd.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ fbd.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ fbd.status[i][j] = false;
+ channel->l_done[i][j] = false;
+ channel->c_done[i][j] = false;
+ }
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+
+static long xlnxsync_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ int ret = -EINVAL;
+ void __user *arg = (void __user *)data;
+ struct xlnxsync_channel *channel = fptr->private_data;
+ struct xlnxsync_device *xlnxsync_dev;
+
+ xlnxsync_dev = channel->dev;
+ if (!xlnxsync_dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg(xlnxsync_dev->dev, "ioctl = 0x%08x\n", cmd);
+
+ switch (cmd) {
+ case XLNXSYNC_GET_CFG:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_get_config(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_GET_STATUS:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_get_status(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_SET_CONFIG:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_config(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_ENABLE:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_enable(channel, true);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_DISABLE:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_enable(channel, false);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_CLR_ERR:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_clr_err(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_GET_FBDONE_STAT:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_get_fbdone_status(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ case XLNXSYNC_CHAN_CLR_FBDONE_STAT:
+ if (mutex_lock_interruptible(&channel->mutex))
+ return -ERESTARTSYS;
+ ret = xlnxsync_chan_clr_fbdone_status(channel, arg);
+ mutex_unlock(&channel->mutex);
+ break;
+ }
+
+ return ret;
+}
+
+static __poll_t xlnxsync_poll(struct file *fptr, poll_table *wait)
+{
+ __poll_t ret = 0, req_events = poll_requested_events(wait);
+ struct xlnxsync_channel *channel = fptr->private_data;
+ struct xlnxsync_device *dev;
+ unsigned long flags;
+
+ dev = channel->dev;
+ if (!dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg_ratelimited(dev->dev, "%s : entered req_events = 0x%x!\n",
+ __func__, req_events);
+
+ if (!(req_events & (POLLPRI | POLLIN)))
+ return 0;
+
+ if (req_events & EPOLLPRI) {
+ poll_wait(fptr, &channel->wq_error, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (channel->err_event) {
+ dev_dbg_ratelimited(dev->dev,
+ "%s : error event in chan = %d!\n",
+ __func__, channel->id);
+ ret |= POLLPRI;
+ channel->err_event = false;
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ }
+
+ if (req_events & EPOLLIN) {
+ poll_wait(fptr, &channel->wq_fbdone, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (channel->framedone_event) {
+ dev_dbg_ratelimited(dev->dev,
+ "%s : fbdone event in chan = %d!\n",
+ __func__, channel->id);
+ ret |= POLLIN;
+ channel->framedone_event = false;
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ }
+
+ return ret;
+}
+
+static int xlnxsync_open(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *dev;
+ struct xlnxsync_channel *chan;
+ unsigned int i;
+
+ dev = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!dev) {
+ pr_err("%s: failed to get xlnxsync driver handle\n", __func__);
+ return -EAGAIN;
+ }
+
+ chan = devm_kzalloc(dev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&dev->sync_mutex))
+ return -ERESTARTSYS;
+ i = find_first_zero_bit_le(&dev->reserved, dev->config.max_channels);
+ if (i >= dev->config.max_channels) {
+ dev_err(dev->dev, "No free channel available\n");
+ mutex_unlock(&dev->sync_mutex);
+ return -ENOSPC;
+ }
+ dev_dbg(dev->dev, "Reserving channel %d\n", i);
+ set_bit(i, &dev->reserved);
+ chan->id = i;
+ list_add_tail(&chan->channel, &dev->channels);
+ chan->dev = dev;
+ fptr->private_data = chan;
+ mutex_init(&chan->mutex);
+ init_waitqueue_head(&chan->wq_fbdone);
+ init_waitqueue_head(&chan->wq_error);
+ dev->chan_count++;
+ atomic_inc(&dev->user_count);
+ dev_dbg(dev->dev, "%s: tid=%d Opened with user count = %d\n",
+ __func__, current->pid, atomic_read(&dev->user_count));
+ mutex_unlock(&dev->sync_mutex);
+
+ return 0;
+}
+
+static int xlnxsync_release(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *dev;
+ struct xlnxsync_channel *channel = fptr->private_data;
+
+ dev = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!dev) {
+ pr_err("%s: failed to get xlnxsync driver handle", __func__);
+ return -EAGAIN;
+ }
+
+ dev_dbg(dev->dev, "%s: tid=%d user count = %d id = %d\n",
+ __func__, current->pid, atomic_read(&dev->user_count),
+ channel->id);
+
+ if (xlnxsync_read(dev, channel->id, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK) {
+ dev_dbg(dev->dev, "Disabling %d channel\n", channel->id);
+ xlnxsync_reset_chan(dev, channel->id);
+ xlnxsync_clr(dev, channel->id, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(dev, channel->id, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+
+ }
+
+ if (mutex_lock_interruptible(&dev->sync_mutex))
+ return -ERESTARTSYS;
+ clear_bit(channel->id, &dev->reserved);
+ dev->chan_count--;
+ list_del(&channel->channel);
+ mutex_unlock(&dev->sync_mutex);
+ devm_kfree(dev->dev, channel);
+
+ if (atomic_dec_and_test(&dev->user_count)) {
+ xlnxsync_reset(dev);
+ dev_dbg(dev->dev,
+ "%s: tid=%d Stopping and clearing device",
+ __func__, current->pid);
+ }
+
+ return 0;
+}
+
+static const struct file_operations xlnxsync_fops = {
+ .open = xlnxsync_open,
+ .release = xlnxsync_release,
+ .unlocked_ioctl = xlnxsync_ioctl,
+ .poll = xlnxsync_poll,
+};
+
+static irqreturn_t xlnxsync_irq_handler(int irq, void *data)
+{
+ struct xlnxsync_device *xlnxsync = (struct xlnxsync_device *)data;
+ u32 val;
+ struct xlnxsync_channel *chan;
+
+ /*
+ * Use simple spin_lock (instead of spin_lock_irqsave) as interrupt
+ * is registered with irqf_oneshot and !irqf_shared
+ */
+ spin_lock(&xlnxsync->irq_lock);
+ list_for_each_entry(chan, &xlnxsync->channels, channel) {
+ u32 i, j;
+
+ val = xlnxsync_read(xlnxsync, chan->id, XLNXSYNC_ISR_REG);
+ xlnxsync_write(xlnxsync, chan->id, XLNXSYNC_ISR_REG, val);
+
+ if (val & XLNXSYNC_ISR_SYNC_FAIL_MASK)
+ chan->sync_err = true;
+ if (val & XLNXSYNC_ISR_WDG_ERR_MASK)
+ chan->wdg_err = true;
+ if (val & XLNXSYNC_ISR_LDIFF)
+ chan->ldiff_err = true;
+ if (val & XLNXSYNC_ISR_CDIFF)
+ chan->cdiff_err = true;
+ if (chan->sync_err || chan->wdg_err ||
+ chan->ldiff_err || chan->cdiff_err)
+ chan->err_event = true;
+
+ if (val & XLNXSYNC_ISR_PLDONE_MASK) {
+ i = (val & XLNXSYNC_ISR_PLDONE_MASK) >>
+ XLNXSYNC_ISR_PLDONE_SHIFT;
+
+ chan->l_done[i][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_PCDONE_MASK) {
+ i = (val & XLNXSYNC_ISR_PCDONE_MASK) >>
+ XLNXSYNC_ISR_PCDONE_SHIFT;
+
+ chan->c_done[i][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CLDONE_MASK) {
+ i = (val & XLNXSYNC_ISR_CLDONE_MASK) >>
+ XLNXSYNC_ISR_CLDONE_SHIFT;
+
+ chan->l_done[i][XLNXSYNC_CONS] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CCDONE_MASK) {
+ i = (val & XLNXSYNC_ISR_CCDONE_MASK) >>
+ XLNXSYNC_ISR_CCDONE_SHIFT;
+
+ chan->c_done[i][XLNXSYNC_CONS] = true;
+ }
+
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ if (chan->l_done[i][j] &&
+ chan->c_done[i][j])
+ chan->framedone_event = true;
+ }
+ }
+
+ if (chan->err_event) {
+ dev_dbg(xlnxsync->dev, "%s : error occurred at channel->id = %d\n",
+ __func__, chan->id);
+ wake_up_interruptible(&chan->wq_error);
+ }
+
+ if (chan->framedone_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : framedone occurred\n",
+ __func__);
+ wake_up_interruptible(&chan->wq_fbdone);
+ }
+
+ }
+
+ spin_unlock(&xlnxsync->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnxsync_parse_dt_prop(struct xlnxsync_device *xlnxsync)
+{
+ struct device_node *node = xlnxsync->dev->of_node;
+ int ret;
+
+ xlnxsync->config.encode = of_property_read_bool(node, "xlnx,encode");
+ dev_dbg(xlnxsync->dev, "synchronizer type = %s\n",
+ xlnxsync->config.encode ? "encode" : "decode");
+
+ ret = of_property_read_u32(node, "xlnx,num-chan",
+ (u32 *)&xlnxsync->config.max_channels);
+ if (ret)
+ return ret;
+
+ dev_dbg(xlnxsync->dev, "max channels = %d\n",
+ xlnxsync->config.max_channels);
+
+ if (xlnxsync->config.max_channels == 0 ||
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_ENC_CHAN) {
+ dev_err(xlnxsync->dev, "Number of channels should be 1 to 4.\n");
+ dev_err(xlnxsync->dev, "Invalid number of channels : %d\n",
+ xlnxsync->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (!xlnxsync->config.encode &&
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_DEC_CHAN) {
+ dev_err(xlnxsync->dev, "Decode can't have more than 2 channels.\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_clk_setup(struct xlnxsync_device *xlnxsync)
+{
+ int ret;
+
+ xlnxsync->axi_clk = devm_clk_get(xlnxsync->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(xlnxsync->axi_clk)) {
+ ret = PTR_ERR(xlnxsync->axi_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->p_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_p_aclk");
+ if (IS_ERR(xlnxsync->p_clk)) {
+ ret = PTR_ERR(xlnxsync->p_clk);
+ dev_err(xlnxsync->dev, "failed to get p_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->c_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_aclk");
+ if (IS_ERR(xlnxsync->c_clk)) {
+ ret = PTR_ERR(xlnxsync->c_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_mm (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->axi_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->p_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable p_clk (%d)\n", ret);
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->c_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_mm (%d)\n", ret);
+ goto err_cclk;
+ }
+
+ return ret;
+
+err_cclk:
+ clk_disable_unprepare(xlnxsync->p_clk);
+err_pclk:
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return ret;
+}
+
+static int xlnxsync_probe(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct device *dc;
+ struct resource *res;
+ int ret;
+
+ xlnxsync = devm_kzalloc(&pdev->dev, sizeof(*xlnxsync), GFP_KERNEL);
+ if (!xlnxsync)
+ return -ENOMEM;
+
+ xlnxsync->minor = ida_simple_get(&xs_ida, 0, XLNXSYNC_DEV_MAX,
+ GFP_KERNEL);
+ if (xlnxsync->minor < 0)
+ return xlnxsync->minor;
+
+ xlnxsync->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource.\n");
+ return -ENODEV;
+ }
+
+ xlnxsync->iomem = devm_ioremap(xlnxsync->dev, res->start,
+ resource_size(res));
+ if (!xlnxsync->iomem) {
+ dev_err(&pdev->dev, "ip register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = xlnxsync_parse_dt_prop(xlnxsync);
+ if (ret < 0)
+ return ret;
+
+ xlnxsync->config.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+ dev_dbg(xlnxsync->dev, "ioctl header version = 0x%llx\n",
+ xlnxsync->config.hdr_ver);
+
+ xlnxsync->irq = irq_of_parse_and_map(xlnxsync->dev->of_node, 0);
+ if (!xlnxsync->irq) {
+ dev_err(xlnxsync->dev, "Unable to parse and get irq.\n");
+ return -EINVAL;
+ }
+ ret = devm_request_threaded_irq(xlnxsync->dev, xlnxsync->irq, NULL,
+ xlnxsync_irq_handler, IRQF_ONESHOT,
+ dev_name(xlnxsync->dev), xlnxsync);
+
+ if (ret) {
+ dev_err(xlnxsync->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ ret = xlnxsync_clk_setup(xlnxsync);
+ if (ret) {
+ dev_err(xlnxsync->dev, "clock setup failed!\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&xlnxsync->channels);
+ spin_lock_init(&xlnxsync->irq_lock);
+
+ mutex_init(&xlnxsync->sync_mutex);
+
+ cdev_init(&xlnxsync->chdev, &xlnxsync_fops);
+ xlnxsync->chdev.owner = THIS_MODULE;
+ ret = cdev_add(&xlnxsync->chdev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor), 1);
+ if (ret < 0) {
+ dev_err(xlnxsync->dev, "cdev_add failed");
+ goto clk_err;
+ }
+
+ if (!xlnxsync_class) {
+ dev_err(xlnxsync->dev, "xvfsync device class not created");
+ goto cdev_err;
+ }
+ dc = device_create(xlnxsync_class, xlnxsync->dev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor),
+ xlnxsync, "xlnxsync%d", xlnxsync->minor);
+ if (IS_ERR(dc)) {
+ ret = PTR_ERR(dc);
+ dev_err(xlnxsync->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ platform_set_drvdata(pdev, xlnxsync);
+ dev_info(xlnxsync->dev, "Xilinx Synchronizer probe successful!\n");
+
+ return 0;
+
+cdev_err:
+ cdev_del(&xlnxsync->chdev);
+clk_err:
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return ret;
+}
+
+static int xlnxsync_remove(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync = platform_get_drvdata(pdev);
+
+ if (!xlnxsync || !xlnxsync_class)
+ return -EIO;
+
+ cdev_del(&xlnxsync->chdev);
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return 0;
+}
+
+static const struct of_device_id xlnxsync_of_match[] = {
+ { .compatible = "xlnx,sync-ip-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xlnxsync_of_match);
+
+static struct platform_driver xlnxsync_driver = {
+ .driver = {
+ .name = XLNXSYNC_DRIVER_NAME,
+ .of_match_table = xlnxsync_of_match,
+ },
+ .probe = xlnxsync_probe,
+ .remove = xlnxsync_remove,
+};
+
+static int __init xlnxsync_init_mod(void)
+{
+ int err;
+
+ xlnxsync_class = class_create(THIS_MODULE, XLNXSYNC_DRIVER_NAME);
+ if (IS_ERR(xlnxsync_class)) {
+ pr_err("%s : Unable to create xlnxsync class", __func__);
+ return PTR_ERR(xlnxsync_class);
+ }
+ err = alloc_chrdev_region(&xlnxsync_devt, 0,
+ XLNXSYNC_DEV_MAX, XLNXSYNC_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s: Unable to get major number for xlnxsync", __func__);
+ goto err_class;
+ }
+ err = platform_driver_register(&xlnxsync_driver);
+ if (err < 0) {
+ pr_err("%s: Unable to register %s driver",
+ __func__, XLNXSYNC_DRIVER_NAME);
+ goto err_pdrv;
+ }
+ return 0;
+err_pdrv:
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+err_class:
+ class_destroy(xlnxsync_class);
+ return err;
+}
+
+static void __exit xlnxsync_cleanup_mod(void)
+{
+ platform_driver_unregister(&xlnxsync_driver);
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+ class_destroy(xlnxsync_class);
+ xlnxsync_class = NULL;
+}
+module_init(xlnxsync_init_mod);
+module_exit(xlnxsync_cleanup_mod);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx Synchronizer IP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(XLNXSYNC_DRIVER_VERSION);
diff --git a/drivers/staging/xroeframer/Kconfig b/drivers/staging/xroeframer/Kconfig
new file mode 100644
index 000000000000..16aa1f2c6a78
--- /dev/null
+++ b/drivers/staging/xroeframer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Xilinx Radio over Ethernet Framer driver
+#
+
+config XROE_FRAMER
+ tristate "Xilinx Radio over Ethernet Framer driver"
+ ---help---
+ The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates
+ Ethernet packet data, (de-)multiplexes packets based on protocol
+ into/from various Radio Antenna data streams.
+
+ It has 2 main, independent, data paths:
+
+ - Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+ we call this the De-Framer path, or defm on all related IP signals.
+
+ - Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+ we call this the Framer path, or fram on all related IP signals.
diff --git a/drivers/staging/xroeframer/Makefile b/drivers/staging/xroeframer/Makefile
new file mode 100644
index 000000000000..f7bf07e98243
--- /dev/null
+++ b/drivers/staging/xroeframer/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(CONFIG_XROE_FRAMER) := framer.o
+
+framer-objs := xroe_framer.o \
+ sysfs_xroe.o \
+ sysfs_xroe_framer_ipv4.o \
+ sysfs_xroe_framer_ipv6.o \
+ sysfs_xroe_framer_udp.o \
+ sysfs_xroe_framer_stats.o
diff --git a/drivers/staging/xroeframer/README b/drivers/staging/xroeframer/README
new file mode 100644
index 000000000000..505a46c2cf62
--- /dev/null
+++ b/drivers/staging/xroeframer/README
@@ -0,0 +1,47 @@
+Xilinx Radio over Ethernet Framer driver
+=========================================
+
+About the RoE Framer
+
+The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates Ethernet
+packet data, (de-)multiplexes packets based on protocol into/from various
+Radio Antenna data streams.
+
+It has 2 main, independent, data paths
+
+- Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+we call this the De-Framer path, or defm on all related IP signals.
+
+- Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+we call this the Framer path, or fram on all related IP signals.
+
+Key points:
+
+- Apart from the AXI4-Lite configuration port and a handful of strobe/control
+signals all data interfaces are AXI Stream(AXIS).
+- The IP does not contain an Ethernet MAC IP, rather it routes, or creates
+packets based on the direction through the roe_framer.
+- Currently designed to work with
+ - 1, 2 or 4 10G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 156.25MHz
+ - 1 or 2 25G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 390.25MHz
+- Contains a filter so that all non-protocol packets, or non-hardware-IP
+processed packets can be forwarded to another block for processing. In general
+this in a Microprocessor, specifically the Zynq ARM in our case. This filter
+function can move into the optional switch when TSN is used.
+
+About the Linux Driver
+
+The RoE Framer Linux Driver provides sysfs access to the framer controls. The
+loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as framing mode, enable, restart etc) are exposed
+under /sys/kernel/xroe. Furthermore, specific controls can be found under
+/sys/kernel/xroe/framer. These include protocol-specific settings, for
+IPv4, IPv6 & UDP.
+
+There is also the option of accessing the framer's register map using
+ioctl calls for both reading and writing (where permitted) directly.
diff --git a/drivers/staging/xroeframer/dt-binding.txt b/drivers/staging/xroeframer/dt-binding.txt
new file mode 100644
index 000000000000..8dabef16d083
--- /dev/null
+++ b/drivers/staging/xroeframer/dt-binding.txt
@@ -0,0 +1,17 @@
+* Xilinx Radio over Ethernet Framer driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_framer@a0000000 {
+ compatible = "xlnx,roe-framer-1.0";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ clock-names = "s_axi_aclk", "m_axis_defm_aclk",
+ "s_axis_fram_aclk", "tx0_eth_port_clk",
+ "internal_bus_clk";
+ clocks = <0x43 0x44 0x44 0x45 0x45>;
+ };
diff --git a/drivers/staging/xroeframer/roe_framer_ctrl.h b/drivers/staging/xroeframer/roe_framer_ctrl.h
new file mode 100644
index 000000000000..162c49a9bc3b
--- /dev/null
+++ b/drivers/staging/xroeframer/roe_framer_ctrl.h
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_FRAMER_V1_0_CFG_BASE_ADDR 0x0 /* 0 */
+#define ROE_FRAMER_V1_0_FRAM_BASE_ADDR 0x2000 /* 8192 */
+#define ROE_FRAMER_V1_0_FRAM_DRP_BASE_ADDR 0x4000 /* 16384 */
+#define ROE_FRAMER_V1_0_DEFM_BASE_ADDR 0x6000 /* 24576 */
+#define ROE_FRAMER_V1_0_DEFM_DRP_BASE_ADDR 0x8000 /* 32768 */
+#define ROE_FRAMER_V1_0_ETH_BASE_ADDR 0xa000 /* 40960 */
+#define ROE_FRAMER_V1_0_STATS_BASE_ADDR 0xc000 /* 49152 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_cfg
+ * with prefix cfg_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define CFG_MAJOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MAJOR_REVISION_MASK 0xff000000 /* 4278190080 */
+#define CFG_MAJOR_REVISION_OFFSET 0x18 /* 24 */
+#define CFG_MAJOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MAJOR_REVISION_DEFAULT 0x1 /* 1 */
+
+/* Type = roInt */
+#define CFG_MINOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MINOR_REVISION_MASK 0xff0000 /* 16711680 */
+#define CFG_MINOR_REVISION_OFFSET 0x10 /* 16 */
+#define CFG_MINOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MINOR_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_VERSION_REVISION_ADDR 0x0 /* 0 */
+#define CFG_VERSION_REVISION_MASK 0xff00 /* 65280 */
+#define CFG_VERSION_REVISION_OFFSET 0x8 /* 8 */
+#define CFG_VERSION_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_VERSION_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_INTERNAL_REVISION_ADDR 0x4 /* 4 */
+#define CFG_INTERNAL_REVISION_MASK 0xffffffff /* 4294967295 */
+#define CFG_INTERNAL_REVISION_OFFSET 0x0 /* 0 */
+#define CFG_INTERNAL_REVISION_WIDTH 0x20 /* 32 */
+#define CFG_INTERNAL_REVISION_DEFAULT 0x12345678 /* 305419896 */
+
+/* Type = rw */
+#define CFG_TIMEOUT_VALUE_ADDR 0x8 /* 8 */
+#define CFG_TIMEOUT_VALUE_MASK 0xfff /* 4095 */
+#define CFG_TIMEOUT_VALUE_OFFSET 0x0 /* 0 */
+#define CFG_TIMEOUT_VALUE_WIDTH 0xc /* 12 */
+#define CFG_TIMEOUT_VALUE_DEFAULT 0x80 /* 128 */
+
+/* Type = rw */
+#define CFG_USER_RW_OUT_ADDR 0xc /* 12 */
+#define CFG_USER_RW_OUT_MASK 0xff /* 255 */
+#define CFG_USER_RW_OUT_OFFSET 0x0 /* 0 */
+#define CFG_USER_RW_OUT_WIDTH 0x8 /* 8 */
+#define CFG_USER_RW_OUT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_USER_RO_IN_ADDR 0xc /* 12 */
+#define CFG_USER_RO_IN_MASK 0xff0000 /* 16711680 */
+#define CFG_USER_RO_IN_OFFSET 0x10 /* 16 */
+#define CFG_USER_RO_IN_WIDTH 0x8 /* 8 */
+#define CFG_USER_RO_IN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_MASTER_INT_ENABLE_ADDR 0x10 /* 16 */
+#define CFG_MASTER_INT_ENABLE_MASK 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_MASTER_INT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_OF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_OF_ENABLE_MASK 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_FIFO_OF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_UF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_UF_ENABLE_MASK 0x2 /* 2 */
+#define CFG_FRAM_FIFO_UF_ENABLE_OFFSET 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_AXI_TIMEOUT_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_AXI_TIMEOUT_ENABLE_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_ENABLE_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_ENABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define CFG_INTERRUPT_STATUS_SAMPLE_ADDR 0x1c /* 28 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_MASK 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_OFFSET 0x0 /* 0 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_WIDTH 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define CFG_FRAM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_FRAM_RESET_STATUS_MASK 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_DEFM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_DEFM_RESET_STATUS_MASK 0x2 /* 2 */
+#define CFG_DEFM_RESET_STATUS_OFFSET 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_MASK 0x100 /* 256 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_OFFSET 0x8 /* 8 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_MASK 0x200 /* 512 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_OFFSET 0x9 /* 9 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_MASK 0x400 /* 1024 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_OFFSET 0xa /* 10 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_MASK 0x800 /* 2048 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_OFFSET 0xb /* 11 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_AXI_TIMEOUT_STATUS_ADDR 0x18 /* 24 */
+#define CFG_AXI_TIMEOUT_STATUS_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_STATUS_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_MASK 0xffff /* 65535 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_MASK 0xffff0000 /* 4294901760 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_MASK 0x3ff /* 1023 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_ETH_SPEED_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_ETH_SPEED_MASK 0x3ff0000 /* 67043328 */
+#define CFG_CONFIG_ETH_SPEED_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_ETH_SPEED_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_ETH_SPEED_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram
+ * with prefix fram_ @ address 0x2000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_DISABLE_ADDR 0x2000 /* 8192 */
+#define FRAM_DISABLE_MASK 0x1 /* 1 */
+#define FRAM_DISABLE_OFFSET 0x0 /* 0 */
+#define FRAM_DISABLE_WIDTH 0x1 /* 1 */
+#define FRAM_DISABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define FRAM_READY_ADDR 0x2000 /* 8192 */
+#define FRAM_READY_MASK 0x2 /* 2 */
+#define FRAM_READY_OFFSET 0x1 /* 1 */
+#define FRAM_READY_WIDTH 0x1 /* 1 */
+#define FRAM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define FRAM_FIFO_FULL_INDICATOR_ADDR 0x2004 /* 8196 */
+#define FRAM_FIFO_FULL_INDICATOR_MASK 0xffffffff /* 4294967295 */
+#define FRAM_FIFO_FULL_INDICATOR_OFFSET 0x0 /* 0 */
+#define FRAM_FIFO_FULL_INDICATOR_WIDTH 0x20 /* 32 */
+#define FRAM_FIFO_FULL_INDICATOR_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MIN_ADDR 0x2020 /* 8224 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MAX_ADDR 0x2024 /* 8228 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_ADDR 0x2028 /* 8232 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x202c /* 8236 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_ADDR 0x2030 /* 8240 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_ADDR 0x2034 /* 8244 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_ADDR 0x2038 /* 8248 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x203c /* 8252 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_ADDR 0x2050 /* 8272 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_ADDR 0x2054 /* 8276 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_ADDR 0x2058 /* 8280 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x205c /* 8284 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x2060 /* 8288 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x2064 /* 8292 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_ADDR 0x2068 /* 8296 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x206c /* 8300 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_PROTOCOL_DEFINITION_ADDR 0x2200 /* 8704 */
+#define FRAM_PROTOCOL_DEFINITION_MASK 0xf /* 15 */
+#define FRAM_PROTOCOL_DEFINITION_OFFSET 0x0 /* 0 */
+#define FRAM_PROTOCOL_DEFINITION_WIDTH 0x4 /* 4 */
+#define FRAM_PROTOCOL_DEFINITION_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_GEN_VLAN_TAG_ADDR 0x2200 /* 8704 */
+#define FRAM_GEN_VLAN_TAG_MASK 0x10 /* 16 */
+#define FRAM_GEN_VLAN_TAG_OFFSET 0x4 /* 4 */
+#define FRAM_GEN_VLAN_TAG_WIDTH 0x1 /* 1 */
+#define FRAM_GEN_VLAN_TAG_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_ADDR 0x2200 /* 8704 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_MASK 0x60 /* 96 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_OFFSET 0x5 /* 5 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_WIDTH 0x2 /* 2 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram_drp
+ * with prefix fram_drp @ address 0x4000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_PC_ID_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_PC_ID_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm
+ * with prefix defm_ @ address 0x6000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_RESTART_ADDR 0x6000 /* 24576 */
+#define DEFM_RESTART_MASK 0x1 /* 1 */
+#define DEFM_RESTART_OFFSET 0x0 /* 0 */
+#define DEFM_RESTART_WIDTH 0x1 /* 1 */
+#define DEFM_RESTART_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_READY_ADDR 0x6000 /* 24576 */
+#define DEFM_READY_MASK 0x2 /* 2 */
+#define DEFM_READY_OFFSET 0x1 /* 1 */
+#define DEFM_READY_WIDTH 0x1 /* 1 */
+#define DEFM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_ERR_PACKET_FILTER_ADDR 0x6004 /* 24580 */
+#define DEFM_ERR_PACKET_FILTER_MASK 0x3 /* 3 */
+#define DEFM_ERR_PACKET_FILTER_OFFSET 0x0 /* 0 */
+#define DEFM_ERR_PACKET_FILTER_WIDTH 0x2 /* 2 */
+#define DEFM_ERR_PACKET_FILTER_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_ADDR 0x6008 /* 24584 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_ADDR 0x600c /* 24588 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MIN_ADDR 0x6020 /* 24608 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MAX_ADDR 0x6024 /* 24612 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x602c /* 24620 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_ADDR 0x6030 /* 24624 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_ADDR 0x6034 /* 24628 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x603c /* 24636 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_ADDR 0x6050 /* 24656 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_ADDR 0x6054 /* 24660 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x605c /* 24668 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x6060 /* 24672 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x6064 /* 24676 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x606c /* 24684 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_31_0_ADDR 0x6100 /* 24832 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_63_32_ADDR 0x6104 /* 24836 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_95_64_ADDR 0x6108 /* 24840 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_127_96_ADDR 0x610c /* 24844 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_DEFAULT 0xfffffeae /* 4294966958 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_MASK_ADDR 0x6110 /* 24848 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_DEFAULT 0xcfff /* 53247 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_31_0_ADDR 0x6120 /* 24864 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_63_32_ADDR 0x6124 /* 24868 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_95_64_ADDR 0x6128 /* 24872 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_127_96_ADDR 0x612c /* 24876 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_MASK_ADDR 0x6130 /* 24880 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_31_0_ADDR 0x6140 /* 24896 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_63_32_ADDR 0x6144 /* 24900 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_95_64_ADDR 0x6148 /* 24904 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_127_96_ADDR 0x614c /* 24908 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_MASK_ADDR 0x6150 /* 24912 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_31_0_ADDR 0x6160 /* 24928 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_63_32_ADDR 0x6164 /* 24932 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_95_64_ADDR 0x6168 /* 24936 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_127_96_ADDR 0x616c /* 24940 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_MASK_ADDR 0x6170 /* 24944 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_DEFAULT 0xffff /* 65535 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm_drp
+ * with prefix defm_drp @ address 0x8000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_DRPDEFM_DATA_PC_ID_ADDR 0x8000 /* 32768 */
+#define DEFM_DRPDEFM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DRPDEFM_CTRL_PC_ID_ADDR 0x8400 /* 33792 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_eth
+ * with prefix eth_ @ address 0xa000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_31_0_ADDR 0xa000 /* 40960 */
+#define ETH_DEST_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_DEST_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_DEST_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_47_32_ADDR 0xa004 /* 40964 */
+#define ETH_DEST_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_DEST_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_DEST_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_31_0_ADDR 0xa008 /* 40968 */
+#define ETH_SRC_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_SRC_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_SRC_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_47_32_ADDR 0xa00c /* 40972 */
+#define ETH_SRC_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_SRC_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_SRC_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_ID_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_ID_MASK 0xfff /* 4095 */
+#define ETH_VLAN_ID_OFFSET 0x0 /* 0 */
+#define ETH_VLAN_ID_WIDTH 0xc /* 12 */
+#define ETH_VLAN_ID_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_DEI_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_DEI_MASK 0x1000 /* 4096 */
+#define ETH_VLAN_DEI_OFFSET 0xc /* 12 */
+#define ETH_VLAN_DEI_WIDTH 0x1 /* 1 */
+#define ETH_VLAN_DEI_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_PCP_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_PCP_MASK 0xe000 /* 57344 */
+#define ETH_VLAN_PCP_OFFSET 0xd /* 13 */
+#define ETH_VLAN_PCP_WIDTH 0x3 /* 3 */
+#define ETH_VLAN_PCP_DEFAULT 0x7 /* 7 */
+
+/* Type = rw */
+#define ETH_IPV4_VERSION_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_VERSION_MASK 0xf /* 15 */
+#define ETH_IPV4_VERSION_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_VERSION_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_VERSION_DEFAULT 0x4 /* 4 */
+
+/* Type = rw */
+#define ETH_IPV4_IHL_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_IHL_MASK 0xf0 /* 240 */
+#define ETH_IPV4_IHL_OFFSET 0x4 /* 4 */
+#define ETH_IPV4_IHL_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_IHL_DEFAULT 0x5 /* 5 */
+
+/* Type = rw */
+#define ETH_IPV4_DSCP_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_DSCP_MASK 0x3f /* 63 */
+#define ETH_IPV4_DSCP_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DSCP_WIDTH 0x6 /* 6 */
+#define ETH_IPV4_DSCP_DEFAULT 0x2e /* 46 */
+
+/* Type = rw */
+#define ETH_IPV4_ECN_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_ECN_MASK 0xc0 /* 192 */
+#define ETH_IPV4_ECN_OFFSET 0x6 /* 6 */
+#define ETH_IPV4_ECN_WIDTH 0x2 /* 2 */
+#define ETH_IPV4_ECN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_ID_ADDR 0xa038 /* 41016 */
+#define ETH_IPV4_ID_MASK 0xffff /* 65535 */
+#define ETH_IPV4_ID_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_ID_WIDTH 0x10 /* 16 */
+#define ETH_IPV4_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_FLAGS_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FLAGS_MASK 0x7 /* 7 */
+#define ETH_IPV4_FLAGS_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_FLAGS_WIDTH 0x3 /* 3 */
+#define ETH_IPV4_FLAGS_DEFAULT 0x2 /* 2 */
+
+/* Type = rw */
+#define ETH_IPV4_FRAGMENT_OFFSET_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FRAGMENT_OFFSET_MASK 0x1fff8 /* 131064 */
+#define ETH_IPV4_FRAGMENT_OFFSET_OFFSET 0x3 /* 3 */
+#define ETH_IPV4_FRAGMENT_OFFSET_WIDTH 0xe /* 14 */
+#define ETH_IPV4_FRAGMENT_OFFSET_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_TIME_TO_LIVE_ADDR 0xa040 /* 41024 */
+#define ETH_IPV4_TIME_TO_LIVE_MASK 0xff /* 255 */
+#define ETH_IPV4_TIME_TO_LIVE_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_TIME_TO_LIVE_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_TIME_TO_LIVE_DEFAULT 0x40 /* 64 */
+
+/* Type = rw */
+#define ETH_IPV4_PROTOCOL_ADDR 0xa044 /* 41028 */
+#define ETH_IPV4_PROTOCOL_MASK 0xff /* 255 */
+#define ETH_IPV4_PROTOCOL_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_PROTOCOL_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_PROTOCOL_DEFAULT 0x11 /* 17 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_SOURCE_ADD_ADDR 0xa048 /* 41032 */
+#define ETH_IPV4_SOURCE_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_SOURCE_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_SOURCE_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_SOURCE_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_DESTINATION_ADD_ADDR 0xa04c /* 41036 */
+#define ETH_IPV4_DESTINATION_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_DESTINATION_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DESTINATION_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_DESTINATION_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_UDP_SOURCE_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_SOURCE_PORT_MASK 0xffff /* 65535 */
+#define ETH_UDP_SOURCE_PORT_OFFSET 0x0 /* 0 */
+#define ETH_UDP_SOURCE_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_SOURCE_PORT_DEFAULT 0x8000 /* 32768 */
+
+/* Type = rw */
+#define ETH_UDP_DESTINATION_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_DESTINATION_PORT_MASK 0xffff0000 /* 4294901760 */
+#define ETH_UDP_DESTINATION_PORT_OFFSET 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_DEFAULT 0xc000 /* 49152 */
+
+/* Type = rw */
+#define ETH_IPV6_V_ADDR 0xa080 /* 41088 */
+#define ETH_IPV6_V_MASK 0xf /* 15 */
+#define ETH_IPV6_V_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_V_WIDTH 0x4 /* 4 */
+#define ETH_IPV6_V_DEFAULT 0x6 /* 6 */
+
+/* Type = rw */
+#define ETH_IPV6_TRAFFIC_CLASS_ADDR 0xa084 /* 41092 */
+#define ETH_IPV6_TRAFFIC_CLASS_MASK 0xff /* 255 */
+#define ETH_IPV6_TRAFFIC_CLASS_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_TRAFFIC_CLASS_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_TRAFFIC_CLASS_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_FLOW_LABEL_ADDR 0xa088 /* 41096 */
+#define ETH_IPV6_FLOW_LABEL_MASK 0xfffff /* 1048575 */
+#define ETH_IPV6_FLOW_LABEL_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_FLOW_LABEL_WIDTH 0x14 /* 20 */
+#define ETH_IPV6_FLOW_LABEL_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_NEXT_HEADER_ADDR 0xa08c /* 41100 */
+#define ETH_IPV6_NEXT_HEADER_MASK 0xff /* 255 */
+#define ETH_IPV6_NEXT_HEADER_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_NEXT_HEADER_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_NEXT_HEADER_DEFAULT 0x11 /* 17 */
+
+/* Type = rw */
+#define ETH_IPV6_HOP_LIMIT_ADDR 0xa090 /* 41104 */
+#define ETH_IPV6_HOP_LIMIT_MASK 0xff /* 255 */
+#define ETH_IPV6_HOP_LIMIT_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_HOP_LIMIT_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_HOP_LIMIT_DEFAULT 0x40 /* 64 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_31_0_ADDR 0xa094 /* 41108 */
+#define ETH_IPV6_SOURCE_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_63_32_ADDR 0xa098 /* 41112 */
+#define ETH_IPV6_SOURCE_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_95_64_ADDR 0xa09c /* 41116 */
+#define ETH_IPV6_SOURCE_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_127_96_ADDR 0xa0a0 /* 41120 */
+#define ETH_IPV6_SOURCE_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_31_0_ADDR 0xa0a4 /* 41124 */
+#define ETH_IPV6_DEST_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_63_32_ADDR 0xa0a8 /* 41128 */
+#define ETH_IPV6_DEST_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_95_64_ADDR 0xa0ac /* 41132 */
+#define ETH_IPV6_DEST_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_127_96_ADDR 0xa0b0 /* 41136 */
+#define ETH_IPV6_DEST_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_stats
+ * with prefix stats_ @ address 0xc000
+ *------------------------------------------------------------------------------
+ */
+/* Type = roSig */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR 0xc000 /* 49152 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_ADDR 0xc004 /* 49156 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_ADDR 0xc008 /* 49160 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PACKETS_CNT_ADDR 0xc00c /* 49164 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR 0xc010 /* 49168 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR 0xc014 /* 49172 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR 0xc018 /* 49176 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_ADDR 0xc01c /* 49180 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR 0xc020 /* 49184 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR 0xc024 /* 49188 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR 0xc028 /* 49192 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PKTS_RATE_ADDR 0xc02c /* 49196 */
+#define STATS_USER_DATA_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PKTS_RATE_ADDR 0xc030 /* 49200 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
diff --git a/drivers/staging/xroeframer/sysfs_xroe.c b/drivers/staging/xroeframer/sysfs_xroe.c
new file mode 100644
index 000000000000..9caf5e50b02f
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * version_show - Returns the block's revision number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ *
+ * Returns the block's major, minor & version revision numbers
+ * in a %d.%d.%d format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 major_rev;
+ u32 minor_rev;
+ u32 version_rev;
+
+ major_rev = utils_sysfs_show_wrapper(CFG_MAJOR_REVISION_ADDR,
+ CFG_MAJOR_REVISION_OFFSET,
+ CFG_MAJOR_REVISION_MASK, kobj);
+ minor_rev = utils_sysfs_show_wrapper(CFG_MINOR_REVISION_ADDR,
+ CFG_MINOR_REVISION_OFFSET,
+ CFG_MINOR_REVISION_MASK, kobj);
+ version_rev = utils_sysfs_show_wrapper(CFG_VERSION_REVISION_ADDR,
+ CFG_VERSION_REVISION_OFFSET,
+ CFG_VERSION_REVISION_MASK, kobj);
+ sprintf(buff, "%d.%d.%d\n", major_rev, minor_rev, version_rev);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer version sysfs entry (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the framer version sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t version_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * enable_show - Returns the framer's enable status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ *
+ * Reads and writes the framer's enable status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 enable;
+
+ enable = utils_sysfs_show_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, kobj);
+ if (enable)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer's enable status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's enable status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 enable = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, enable, kobj);
+ return xroe_size;
+}
+
+/**
+ * framer_restart_show - Returns the framer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the framer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 restart;
+
+ restart = utils_sysfs_show_wrapper(FRAM_DISABLE_ADDR,
+ FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, kobj);
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framer_restart_store - Writes to the framer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ restart = 0x01;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ restart = 0x00;
+ utils_sysfs_store_wrapper(FRAM_DISABLE_ADDR, FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, restart, kobj);
+ return xroe_size;
+}
+
+/**
+ * deframer_restart_show - Returns the deframer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the deframer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t deframer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * deframer_restart_store - Writes to the deframer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the deframer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t deframer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+
+ return xroe_size;
+}
+
+/**
+ * xxv_reset_show - Returns the XXV's reset status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the XXV's reset status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t xxv_reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+ if (restart)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * xxv_reset_store - Writes to the XXV's reset register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the XXV's reset status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t xxv_reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+ return xroe_size;
+}
+
+/**
+ * framing_show - Returns the current framing
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the current framing type to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framing_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ u8 buffer = 0;
+ u8 framing = 0xff;
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ buffer = ioread8(working_address);
+ framing = buffer;
+ if (framing == 0)
+ sprintf(buff, "eCPRI\n");
+ else if (framing == 1)
+ sprintf(buff, "1914.3\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framing_store - Writes to the current framing register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the current framing
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framing_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "eCPRI", xroe_size) == 0)
+ iowrite8(0, working_address);
+ else if (strncmp(xroe_tmp, "1914.3", xroe_size) == 0)
+ iowrite8(1, working_address);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, version_show, version_store);
+
+static struct kobj_attribute enable_attribute =
+ __ATTR(enable, 0660, enable_show, enable_store);
+
+static struct kobj_attribute framer_restart =
+ __ATTR(framer_restart, 0660, framer_restart_show, framer_restart_store);
+
+static struct kobj_attribute deframer_restart =
+ __ATTR(deframer_restart, 0660, deframer_restart_show,
+ deframer_restart_store);
+
+static struct kobj_attribute xxv_reset =
+ __ATTR(xxv_reset, 0660, xxv_reset_show, xxv_reset_store);
+
+static struct kobj_attribute framing_attribute =
+ __ATTR(framing, 0660, framing_show, framing_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &enable_attribute.attr,
+ &framer_restart.attr,
+ &deframer_restart.attr,
+ &xxv_reset.attr,
+ &framing_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *root_xroe_kobj;
+
+/**
+ * xroe_sysfs_init - Creates the xroe sysfs directory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ */
+int xroe_sysfs_init(void)
+{
+ int ret;
+
+ root_xroe_kobj = kobject_create_and_add("xroe", kernel_kobj);
+ if (!root_xroe_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(root_xroe_kobj, &attr_group);
+ if (ret)
+ kobject_put(root_xroe_kobj);
+ ret = xroe_sysfs_ipv4_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_ipv6_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_udp_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_stats_init();
+ return ret;
+}
+
+/**
+ * xroe_sysfs_exit - Deletes the xroe sysfs directory and entries
+ *
+ * Deletes the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ *
+ */
+void xroe_sysfs_exit(void)
+{
+ int i;
+
+ xroe_sysfs_ipv4_exit();
+ xroe_sysfs_ipv6_exit();
+ xroe_sysfs_udp_exit();
+ xroe_sysfs_stats_exit();
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_eth_ports[i]);
+ kobject_put(kobj_framer);
+ kobject_put(root_xroe_kobj);
+}
+
+/**
+ * utils_write32withmask - Writes a masked 32-bit value
+ * @working_address: The starting address to write
+ * @value: The value to be written
+ * @mask: The mask to be used
+ * @offset: The offset from the provided starting address
+ *
+ * Writes a 32-bit value to the provided address with the input mask
+ *
+ * Return: 0 on success
+ */
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset)
+{
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0, buffer = 0;
+
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+ return 0;
+}
+
+/**
+ * utils_sysfs_path_to_eth_port_num - Get the current ethernet port
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Extracts the number of the current ethernet port instance
+ *
+ * Return: The number of the ethernet port instance (0 - MAX_NUM_ETH_PORTS) on
+ * success, -1 otherwise
+ */
+static int utils_sysfs_path_to_eth_port_num(struct kobject *kobj)
+{
+ char *current_path = NULL;
+ int port;
+ int ret;
+
+ current_path = kobject_get_path(kobj, GFP_KERNEL);
+ ret = sscanf(current_path, "/kernel/xroe/framer/eth_port_%d/", &port);
+ /* if sscanf() returns 0, no fields were assigned, therefore no
+ * adjustments will be made for port number
+ */
+ if (ret == 0)
+ port = 0;
+// printk(KERN_ALERT "current_path: %s port: %d\n", current_path, port);
+ kfree(current_path);
+ return port;
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is written to the designated register
+ *
+ */
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ utils_write32withmask(working_address, value, mask, offset);
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is read from the designated register and returned.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+ u32 buffer;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
new file mode 100644
index 000000000000..aaaefb10c597
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes);
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv4_version_show - Returns the IPv4 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version number
+ *
+ * Returns the IPv4 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV4_VERSION_ADDR,
+ ETH_IPV4_VERSION_OFFSET,
+ ETH_IPV4_VERSION_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_version_store - Writes to the IPv4 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv4_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv4_ihl_show - Returns the IPv4 IHL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ *
+ * Returns the IPv4 IHL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ihl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ihl;
+
+ ihl = utils_sysfs_show_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, kobj);
+ sprintf(buff, "%d\n", ihl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ihl_store - Writes to the IPv4 IHL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 IHL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ihl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ihl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ihl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, ihl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_dscp_show - Returns the IPv4 DSCP
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ *
+ * Returns the IPv4 DSCP
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_dscp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 dscp;
+
+ dscp = utils_sysfs_show_wrapper(ETH_IPV4_DSCP_ADDR,
+ ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, kobj);
+ sprintf(buff, "%d\n", dscp);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_dscp_store - Writes to the IPv4 DSCP sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 DSCP sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_dscp_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 dscp;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dscp);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_DSCP_ADDR, ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, dscp, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ecn_show - Returns the IPv4 ECN
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ *
+ * Returns the IPv4 ECN
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ecn_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ecn;
+
+ ecn = utils_sysfs_show_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, kobj);
+ sprintf(buff, "%d\n", ecn);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ecn_store - Writes to the IPv4 ECN sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ECN sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ecn_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ecn;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ecn);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, ecn, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_id_show - Returns the IPv4 ID
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ *
+ * Returns the IPv4 ID
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 id;
+
+ id = utils_sysfs_show_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, kobj);
+ sprintf(buff, "%d\n", id);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_id_store - Writes to the IPv4 ID sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ID sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_id_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 id;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &id);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, id, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_flags_show - Returns the IPv4 flags
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ *
+ * Returns the IPv4 flags
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_flags_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flags;
+
+ flags = utils_sysfs_show_wrapper(ETH_IPV4_FLAGS_ADDR,
+ ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, kobj);
+ sprintf(buff, "%d\n", flags);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_flags_store - Writes to the IPv4 flags sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 flags sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_flags_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 flags;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flags);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FLAGS_ADDR, ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, flags, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_fragment_offset_show - Returns the IPv4 fragment offset
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ *
+ * Returns the IPv4 fragment offset
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_fragment_offset_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 fragment;
+
+ fragment = utils_sysfs_show_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK,
+ kobj);
+ sprintf(buff, "%d\n", fragment);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_fragment_offset_store - Writes to the IPv4 fragment offset sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 fragment offset sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_fragment_offset_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 fragment;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &fragment);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK, fragment,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ttl_show - Returns the IPv4 TTL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ *
+ * Returns the IPv4 TTL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ttl_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 ttl;
+
+ ttl = utils_sysfs_show_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, kobj);
+ sprintf(buff, "%d\n", ttl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ttl_store - Writes to the IPv4 TTL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 TTL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ttl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ttl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ttl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, ttl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_protocol_show - Returns the IPv4 protocol
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ *
+ * Returns the IPv4 protocol
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_protocol_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 protocol;
+
+ protocol = utils_sysfs_show_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, kobj);
+ sprintf(buff, "%d\n", protocol);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_protocol_store - Writes to the IPv4 protocol sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 protocol sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_protocol_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 protocol;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &protocol);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, protocol, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_source_address_show - Returns the IPv4 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv4 source address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source_add = 0;
+ unsigned char ip_addr_char[4];
+
+ source_add = utils_sysfs_show_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, kobj);
+ utils_ipv4addr_hextochar(source_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_source_address_store - Writes to the IPv4 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &source_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, source_add,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_destination_address_show - Returns the IPv4 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv4 destination address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest_add = 0;
+ unsigned char ip_addr_char[4];
+
+ dest_add = utils_sysfs_show_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ kobj);
+ utils_ipv4addr_hextochar(dest_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_destination_address_store - Writes to the IPv4 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &dest_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ dest_add, kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv4_version_show, ipv4_version_store);
+static struct kobj_attribute ihl_attribute =
+ __ATTR(ihl, 0660, ipv4_ihl_show, ipv4_ihl_store);
+static struct kobj_attribute dscp_attribute =
+ __ATTR(dscp, 0660, ipv4_dscp_show, ipv4_dscp_store);
+static struct kobj_attribute ecn_attribute =
+ __ATTR(ecn, 0660, ipv4_ecn_show, ipv4_ecn_store);
+static struct kobj_attribute id_attribute =
+ __ATTR(id, 0660, ipv4_id_show, ipv4_id_store);
+static struct kobj_attribute flags_attribute =
+ __ATTR(flags, 0660, ipv4_flags_show, ipv4_flags_store);
+static struct kobj_attribute fragment_offset_attribute =
+ __ATTR(fragment_offset, 0660, ipv4_fragment_offset_show,
+ ipv4_fragment_offset_store);
+static struct kobj_attribute ttl_attribute =
+ __ATTR(ttl, 0660, ipv4_ttl_show, ipv4_ttl_store);
+static struct kobj_attribute protocol_attribute =
+ __ATTR(protocol, 0660, ipv4_protocol_show, ipv4_protocol_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv4_source_address_show,
+ ipv4_source_address_store);
+static struct kobj_attribute destination_add_attribute =
+ __ATTR(dest_add, 0660, ipv4_destination_address_show,
+ ipv4_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &ihl_attribute.attr,
+ &dscp_attribute.attr,
+ &ecn_attribute.attr,
+ &id_attribute.attr,
+ &flags_attribute.attr,
+ &fragment_offset_attribute.attr,
+ &ttl_attribute.attr,
+ &protocol_attribute.attr,
+ &source_add_attribute.attr,
+ &destination_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_framer;
+static struct kobject *kobj_ipv4[MAX_NUM_ETH_PORTS];
+struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv4_init - Creates the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv4" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv4_init(void)
+{
+ int ret;
+ int i;
+ char eth_port_dir_name[11];
+
+ kobj_framer = kobject_create_and_add("framer", root_xroe_kobj);
+ if (!kobj_framer)
+ return -ENOMEM;
+ for (i = 0; i < 4; i++) {
+ snprintf(eth_port_dir_name, sizeof(eth_port_dir_name),
+ "eth_port_%d", i);
+ kobj_eth_ports[i] = kobject_create_and_add(eth_port_dir_name,
+ kobj_framer);
+ if (!kobj_eth_ports[i])
+ return -ENOMEM;
+ kobj_ipv4[i] = kobject_create_and_add("ipv4",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv4[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv4[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv4[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv4" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_ipv4_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv4[i]);
+}
+
+/**
+ * utils_ipv4addr_hextochar - Integer to char array for IPv4 addresses
+ * @ip: The IP address in integer format
+ * @bytes: The IP address in a 4-byte array
+ *
+ * Coverts an IPv4 address given in unsigned integer format to a character array
+ */
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes)
+{
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+}
+
+/**
+ * utils_ipv4addr_chartohex - Character to char array for IPv4 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv4 address given as a character array to integer format
+ *
+ * Return: 4 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int count = 0, ret = -1;
+ char *string;
+ unsigned char *found;
+ u32 byte_array[4];
+ u32 byte = 0;
+
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ".")) != NULL) {
+ if (count <= 4) {
+ ret = kstrtouint(found, 10, &byte);
+ if (ret)
+ return ret;
+ byte_array[count] = byte;
+ } else {
+ break;
+ }
+ count++;
+ }
+
+ if (count == 4) {
+ ret = count;
+ *p_ip_addr = byte_array[3] | (byte_array[2] << 8)
+ | (byte_array[1] << 16) | (byte_array[0] << 24);
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
new file mode 100644
index 000000000000..c26eae426cc1
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 60 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16);
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv6_version_show - Returns the IPv6 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version number
+ *
+ * Returns the IPv6 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV6_V_ADDR, ETH_IPV6_V_OFFSET,
+ ETH_IPV6_V_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_version_store - Writes to the IPv6 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv6_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv6_traffic_class_show - Returns the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ *
+ * Returns the IPv6 traffic class
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_traffic_class_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 traffic_class;
+
+ traffic_class = utils_sysfs_show_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK,
+ kobj);
+ sprintf(buff, "%d\n", traffic_class);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_traffic_class_store - Writes to the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 traffic class sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_traffic_class_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 traffic_class;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &traffic_class);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK, traffic_class,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_flow_label_show - Returns the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ *
+ * Returns the IPv6 flow label
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_flow_label_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flow_label;
+
+ flow_label = utils_sysfs_show_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, kobj);
+ sprintf(buff, "%d\n", flow_label);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_flow_label_store - Writes to the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 flow label sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_flow_label_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 flow_label;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flow_label);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, flow_label, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_next_header_show - Returns the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ *
+ * Returns the IPv6 next header
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_next_header_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 next_header;
+
+ next_header = utils_sysfs_show_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, kobj);
+ sprintf(buff, "%d\n", next_header);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_next_header_store - Writes to the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 next header sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_next_header_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 next_header;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &next_header);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, next_header, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_hop_limit_show - Returns the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ *
+ * Returns the IPv6 hop limit
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_hop_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 hop_limit;
+
+ hop_limit = utils_sysfs_show_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, kobj);
+ sprintf(buff, "%d\n", hop_limit);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_hop_limit_store - Writes to the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 hop limit sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_hop_limit_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 hop_limit;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &hop_limit);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, hop_limit, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_source_address_show - Returns the IPv6 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv6 source address in xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx
+ * format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source[4];
+ u16 source_add16[8];
+
+ source[0] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ kobj);
+ source[1] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ kobj);
+ source[2] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ kobj);
+ source[3] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(source, source_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ source_add16[0], source_add16[1], source_add16[2],
+ source_add16[3],
+ source_add16[4], source_add16[5], source_add16[6],
+ source_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_source_address_store - Writes to the IPv6 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, source_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ source_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ source_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ source_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ source_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/**
+ * ipv6_destination_address_show - Returns the IPv6 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv6 destination address in
+ * xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest[4];
+ u16 dest_add16[8];
+
+ dest[0] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ kobj);
+ dest[1] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ kobj);
+ dest[2] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ kobj);
+ dest[3] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(dest, dest_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ dest_add16[0], dest_add16[1], dest_add16[2],
+ dest_add16[3],
+ dest_add16[4], dest_add16[5], dest_add16[6],
+ dest_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_destination_address_store - Writes to the IPv6 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, dest_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ dest_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ dest_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ dest_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ dest_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv6_version_show, ipv6_version_store);
+static struct kobj_attribute traffic_class =
+ __ATTR(traffic_class, 0660, ipv6_traffic_class_show,
+ ipv6_traffic_class_store);
+static struct kobj_attribute flow_label =
+ __ATTR(flow_label, 0660, ipv6_flow_label_show, ipv6_flow_label_store);
+static struct kobj_attribute next_header =
+ __ATTR(next_header, 0660, ipv6_next_header_show,
+ ipv6_next_header_store);
+static struct kobj_attribute hop_limit =
+ __ATTR(hop_limit, 0660, ipv6_hop_limit_show, ipv6_hop_limit_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv6_source_address_show,
+ ipv6_source_address_store);
+static struct kobj_attribute dest_add_attribute =
+ __ATTR(dest_add, 0660, ipv6_destination_address_show,
+ ipv6_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &traffic_class.attr,
+ &flow_label.attr,
+ &next_header.attr,
+ &hop_limit.attr,
+ &source_add_attribute.attr,
+ &dest_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_ipv6[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv6_init - Creates the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv6" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv6_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_ipv6[i] = kobject_create_and_add("ipv6",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv6[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv6[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv6[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv6" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_ipv6_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv6[i]);
+}
+
+/**
+ * utils_ipv6addr_32to16 - uint32_t to uint16_t for IPv6 addresses
+ * @ip32: The IPv6 address in uint32_t format
+ * @ip16: The IPv6 address in uint16_t format
+ *
+ * Coverts an IPv6 address given in uint32_t format to uint16_t
+ */
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16)
+{
+ ip16[0] = ip32[0] >> 16;
+ ip16[1] = ip32[0] & 0x0000FFFF;
+ ip16[2] = ip32[1] >> 16;
+ ip16[3] = ip32[1] & 0x0000FFFF;
+ ip16[4] = ip32[2] >> 16;
+ ip16[5] = ip32[2] & 0x0000FFFF;
+ ip16[6] = ip32[3] >> 16;
+ ip16[7] = ip32[3] & 0x0000FFFF;
+}
+
+/**
+ * utils_ipv6addr_chartohex - Character to char array for IPv6 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv6 address given as a character array to integer format
+ *
+ * Return: 8 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int ret;
+ int count;
+ char *string;
+ unsigned char *found;
+ u16 ip_array_16[8];
+ u32 field;
+
+ ret = -1;
+ count = 0;
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ":")) != NULL) {
+ if (count <= 8) {
+ ret = kstrtouint(found, 16, &field);
+ if (ret)
+ return ret;
+ ip_array_16[count] = (uint16_t)field;
+ } else {
+ break;
+ }
+ count++;
+ }
+ if (count == 8) {
+ p_ip_addr[0] = ip_array_16[1] | (ip_array_16[0] << 16);
+ p_ip_addr[1] = ip_array_16[3] | (ip_array_16[2] << 16);
+ p_ip_addr[2] = ip_array_16[5] | (ip_array_16[4] << 16);
+ p_ip_addr[3] = ip_array_16[7] | (ip_array_16[6] << 16);
+ ret = count;
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
new file mode 100644
index 000000000000..063664bb987a
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+/**
+ * total_rx_good_pkt_show - Returns the total good rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_pkt_show - Returns the total bad rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_fcs_show - Returns the total bad fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_FCS_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_pkt_show - Returns the total user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PACKETS_CNT_ADDR,
+ STATS_USER_DATA_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_DATA_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_pkt_show - Returns the total good user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_pkt_show - Returns the total bad user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_fcs_show - Returns the total bad user rx fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_ctrl_pkt_show - Returns the total user rx control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PACKETS_CNT_ADDR,
+ STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_ctrl_pkt_show - Returns the total good user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_pkt_show - Returns the total bad user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_fcs_show - Returns the total bad user rx
+ * control fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user control frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * rx_user_pkt_rate_show - Returns the rate of user packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user packets
+ */
+static ssize_t rx_user_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PKTS_RATE_ADDR,
+ STATS_USER_DATA_RX_PKTS_RATE_OFFSET,
+ STATS_USER_DATA_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/**
+ * rx_user_ctrl_pkt_rate_show - Returns the rate of user control packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user control packets
+ */
+static ssize_t rx_user_ctrl_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PKTS_RATE_ADDR,
+ STATS_USER_CTRL_RX_PKTS_RATE_OFFSET,
+ STATS_USER_CTRL_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+static struct kobj_attribute total_rx_good_pkt_attribute =
+ __ATTR(total_rx_good_pkt, 0444, total_rx_good_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_pkt_attribute =
+ __ATTR(total_rx_bad_pkt, 0444, total_rx_bad_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_fcs_attribute =
+ __ATTR(total_rx_bad_fcs, 0444, total_rx_bad_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_pkt_attribute =
+ __ATTR(total_rx_user_pkt, 0444, total_rx_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_pkt_attribute =
+ __ATTR(total_rx_good_user_pkt, 0444, total_rx_good_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_pkt_attribute =
+ __ATTR(total_rx_bad_user_pkt, 0444, total_rx_bad_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_fcs_attribute =
+ __ATTR(total_rx_bad_user_fcs, 0444, total_rx_bad_user_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_user_ctrl_pkt, 0444, total_rx_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_good_user_ctrl_pkt, 0444,
+ total_rx_good_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_bad_user_ctrl_pkt, 0444,
+ total_rx_bad_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_fcs_attribute =
+ __ATTR(total_rx_bad_user_ctrl_fcs, 0444,
+ total_rx_bad_user_ctrl_fcs_show, NULL);
+static struct kobj_attribute rx_user_pkt_rate_attribute =
+ __ATTR(rx_user_pkt_rate, 0444, rx_user_pkt_rate_show, NULL);
+static struct kobj_attribute rx_user_ctrl_pkt_rate_attribute =
+ __ATTR(rx_user_ctrl_pkt_rate, 0444, rx_user_ctrl_pkt_rate_show, NULL);
+
+static struct attribute *attrs[] = {
+ &total_rx_good_pkt_attribute.attr,
+ &total_rx_bad_pkt_attribute.attr,
+ &total_rx_bad_fcs_attribute.attr,
+ &total_rx_user_pkt_attribute.attr,
+ &total_rx_good_user_pkt_attribute.attr,
+ &total_rx_bad_user_pkt_attribute.attr,
+ &total_rx_bad_user_fcs_attribute.attr,
+ &total_rx_user_ctrl_pkt_attribute.attr,
+ &total_rx_good_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_fcs_attribute.attr,
+ &rx_user_pkt_rate_attribute.attr,
+ &rx_user_ctrl_pkt_rate_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_stats;
+
+/**
+ * xroe_sysfs_stats_init - Creates the xroe sysfs "stats" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "stats" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_stats_init(void)
+{
+ int ret;
+
+ kobj_stats = kobject_create_and_add("stats", root_xroe_kobj);
+ if (!kobj_stats)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(kobj_stats, &attr_group);
+ if (ret)
+ kobject_put(kobj_stats);
+
+ return ret;
+}
+
+/**
+ * xroe_sysfs_stats_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "stats" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_stats_exit(void)
+{
+ kobject_put(kobj_stats);
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
new file mode 100644
index 000000000000..8f8a77b25da7
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+
+/**
+ * udp_source_port_show - Returns the UDP source port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ *
+ * Returns the UDP source port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_source_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 source_port;
+
+ source_port = utils_sysfs_show_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, kobj);
+ sprintf(buff, "%d\n", source_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_source_port_store - Writes to the UDP source port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP source port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_source_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 source_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &source_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, source_port, kobj);
+ return xroe_size;
+}
+
+/**
+ * udp_destination_port_show - Returns the UDP destination port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ *
+ * Returns the UDP destination port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_destination_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 dest_port;
+
+ dest_port = utils_sysfs_show_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK,
+ kobj);
+ sprintf(buff, "%d\n", dest_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_destination_port_store - Writes to the UDP destination port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP destination port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_destination_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 dest_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dest_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK, dest_port,
+ kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute source_port =
+ __ATTR(source_port, 0660, udp_source_port_show,
+ udp_source_port_store);
+static struct kobj_attribute dest_port =
+ __ATTR(dest_port, 0660, udp_destination_port_show,
+ udp_destination_port_store);
+
+static struct attribute *attrs[] = {
+ &source_port.attr,
+ &dest_port.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_udp[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_udp_init - Creates the xroe sysfs "udp" subdirectory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "udp" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_udp_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_udp[i] = kobject_create_and_add("udp", kobj_eth_ports[i]);
+ if (!kobj_udp[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_udp[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_udp[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv6_exit - Deletes the xroe sysfs "udp" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "udp" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_udp_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_udp[i]);
+}
diff --git a/drivers/staging/xroeframer/xroe_framer.c b/drivers/staging/xroeframer/xroe_framer.c
new file mode 100644
index 000000000000..dba7c69b010f
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "xroe_framer.h"
+
+#define DRIVER_NAME "framer"
+
+/*
+ * TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the "lp" structure is shared among the multiple source files
+ */
+struct framer_local *lp;
+static struct platform_driver framer_driver;
+/*
+ * TODO: placeholder for the IRQ once it's been implemented
+ * in the framer block
+ */
+static irqreturn_t framer_irq(int irq, void *lp)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * framer_probe - Probes the device tree to locate the framer block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the framer block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int framer_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem; /* IO mem resources */
+ struct resource *r_irq;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+
+ dev_dbg(dev, "Device Tree Probing\n");
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_sysfs_init();
+ /* Get IRQ for the device */
+ /*
+ * TODO: No IRQ *yet* in the DT from the framer block, as it's still
+ * under development. To be added once it's in the block, and also
+ * replace with platform_get_irq_byname()
+ */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (IS_ERR(r_irq)) {
+ dev_info(dev, "no IRQ found\n");
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+ rc = devm_request_irq(dev, lp->irq, &framer_irq, 0, DRIVER_NAME, lp);
+ if (rc) {
+ dev_err(dev, "testmodule: Could not allocate interrupt %d.\n",
+ lp->irq);
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+
+ return rc;
+}
+
+/**
+ * framer_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the framer driver and creates character device drivers
+ * for the whole block, as well as separate ones for stats and
+ * radio control.
+ */
+static int __init framer_init(void)
+{
+ int ret;
+
+ pr_debug("XROE framer driver init\n");
+
+ ret = platform_driver_register(&framer_driver);
+
+ return ret;
+}
+
+/**
+ * framer_exit - Destroys the driver
+ *
+ * Unregisters the framer driver and destroys the character
+ * device driver for the whole block, as well as the separate ones
+ * for stats and radio control. Returns 0 upon successful execution
+ */
+static void __exit framer_exit(void)
+{
+ xroe_sysfs_exit();
+ platform_driver_unregister(&framer_driver);
+ pr_info("XROE Framer exit\n");
+}
+
+module_init(framer_init);
+module_exit(framer_exit);
+
+static const struct of_device_id framer_of_match[] = {
+ { .compatible = "xlnx,roe-framer-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, framer_of_match);
+
+static struct platform_driver framer_driver = {
+ .driver = {
+ /*
+ * TODO: .name shouldn't be necessary, though removing
+ * it results in kernel panic. To investigate further
+ */
+ .name = DRIVER_NAME,
+ .of_match_table = framer_of_match,
+ },
+ .probe = framer_probe,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("framer - Xilinx Radio over Ethernet Framer driver");
diff --git a/drivers/staging/xroeframer/xroe_framer.h b/drivers/staging/xroeframer/xroe_framer.h
new file mode 100644
index 000000000000..03b8bb39095c
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include "roe_framer_ctrl.h"
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/stat.h> /* S_IRUSR, S_IWUSR */
+
+/* TODO: Remove hardcoded value of number of Ethernet ports and read the value
+ * from the device tree.
+ */
+#define MAX_NUM_ETH_PORTS 0x4
+/* TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the following 3 structures are shared among the multiple
+ * source files
+ */
+extern struct framer_local *lp;
+extern struct kobject *root_xroe_kobj;
+extern struct kobject *kobj_framer;
+extern struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+struct framer_local {
+ int irq;
+ unsigned long mem_start;
+ unsigned long mem_end;
+ void __iomem *base_addr;
+};
+
+int xroe_sysfs_init(void);
+int xroe_sysfs_ipv4_init(void);
+int xroe_sysfs_ipv6_init(void);
+int xroe_sysfs_udp_init(void);
+int xroe_sysfs_stats_init(void);
+void xroe_sysfs_exit(void);
+void xroe_sysfs_ipv4_exit(void);
+void xroe_sysfs_ipv6_exit(void);
+void xroe_sysfs_udp_exit(void);
+void xroe_sysfs_stats_exit(void);
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset);
+int utils_check_address_offset(u32 offset, size_t device_size);
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj);
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj);
diff --git a/drivers/staging/xroetrafficgen/Kconfig b/drivers/staging/xroetrafficgen/Kconfig
new file mode 100644
index 000000000000..d2ead1483408
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Xilinx Radio over Ethernet Traffic Generator driver
+#
+
+config XROE_TRAFFIC_GEN
+ tristate "Xilinx Radio over Ethernet Traffic Generator driver"
+ help
+ The Traffic Generator is used for in testing of other RoE IP Blocks
+ (currenty the XRoE Framer) and simulates an radio antenna interface.
+ It generates rolling rampdata for eCPRI antenna paths.
+ Each path is tagged with the antenna number. The sink locks to this
+ ramp data, then checks the next value is as expected.
diff --git a/drivers/staging/xroetrafficgen/Makefile b/drivers/staging/xroetrafficgen/Makefile
new file mode 100644
index 000000000000..e180a9bbc589
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(XROE_TRAFFIC_GEN) := xroe_traffic_gen.o
+
+framer-objs := xroe-traffic-gen.o \
+ xroe-traffic-gen-sysfs.o \
diff --git a/drivers/staging/xroetrafficgen/README b/drivers/staging/xroetrafficgen/README
new file mode 100644
index 000000000000..1828426af847
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/README
@@ -0,0 +1,19 @@
+Xilinx Radio over Ethernet Traffic Generator driver
+===================================================
+
+About the RoE Framer Traffic Generator
+
+The Traffic Generator is used for in testing of other RoE IP Blocks (currenty
+the XRoE Framer) and simulates an radio antenna interface. It generates rolling
+rampdata for eCPRI antenna paths. Each path is tagged with the antenna number.
+The sink locks to this ramp data, then checks the next value is as expected.
+
+
+About the Linux Driver
+
+The RoE Traffic Generator Linux Driver provides sysfs access to control a
+simulated radio antenna interface.
+The loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as sink lock, enable, loopback etc) are exposed
+under /sys/kernel/xroetrafficgen.
diff --git a/drivers/staging/xroetrafficgen/dt-binding.txt b/drivers/staging/xroetrafficgen/dt-binding.txt
new file mode 100644
index 000000000000..3516d3ff8009
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/dt-binding.txt
@@ -0,0 +1,15 @@
+* Xilinx Radio over Ethernet Traffic Generator driver
+
+Required properties:
+- compatible: must be "xlnx,roe-framer-1.0"
+- reg: physical base address of the framer and length of memory mapped region
+- clock-names: list of clock names
+- clocks: list of clock sources corresponding to the clock names
+
+Example:
+ roe_radio_ctrl@a0060000 {
+ compatible = "xlnx,roe-traffic-gen-1.0";
+ reg = <0x0 0xa0060000 0x0 0x10000>;
+ clock-names = "s_axis_fram_aclk", "s_axi_aclk";
+ clocks = <0x44 0x43>;
+ };
diff --git a/drivers/staging/xroetrafficgen/roe_radio_ctrl.h b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
new file mode 100644
index 000000000000..e093386f3e94
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_RADIO_CFG_BASE_ADDR 0x0
+#define ROE_RADIO_SOURCE_BASE_ADDR 0x1000
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_cfg
+ * with prefix radio_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define RADIO_ID_ADDR 0x0
+#define RADIO_ID_MASK 0xffffffff
+#define RADIO_ID_OFFSET 0x0
+#define RADIO_ID_WIDTH 0x20
+#define RADIO_ID_DEFAULT 0x120001
+
+/* Type = rw */
+#define RADIO_TIMEOUT_ENABLE_ADDR 0x4
+#define RADIO_TIMEOUT_ENABLE_MASK 0x1
+#define RADIO_TIMEOUT_ENABLE_OFFSET 0x0
+#define RADIO_TIMEOUT_ENABLE_WIDTH 0x1
+#define RADIO_TIMEOUT_ENABLE_DEFAULT 0x0
+
+/* Type = ro */
+#define RADIO_TIMEOUT_STATUS_ADDR 0x8
+#define RADIO_TIMEOUT_STATUS_MASK 0x1
+#define RADIO_TIMEOUT_STATUS_OFFSET 0x0
+#define RADIO_TIMEOUT_STATUS_WIDTH 0x1
+#define RADIO_TIMEOUT_STATUS_DEFAULT 0x1
+
+/* Type = rw */
+#define RADIO_TIMEOUT_VALUE_ADDR 0xc
+#define RADIO_TIMEOUT_VALUE_MASK 0xfff
+#define RADIO_TIMEOUT_VALUE_OFFSET 0x0
+#define RADIO_TIMEOUT_VALUE_WIDTH 0xc
+#define RADIO_TIMEOUT_VALUE_DEFAULT 0x80
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDMODE2_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDMODE2_MASK 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_OFFSET 0x0
+#define RADIO_GPIO_CDC_LEDMODE2_WIDTH 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDGPIO_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDGPIO_MASK 0x30
+#define RADIO_GPIO_CDC_LEDGPIO_OFFSET 0x4
+#define RADIO_GPIO_CDC_LEDGPIO_WIDTH 0x2
+#define RADIO_GPIO_CDC_LEDGPIO_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_GPIO_CDC_DIPSTATUS_ADDR 0x14
+#define RADIO_GPIO_CDC_DIPSTATUS_MASK 0xff
+#define RADIO_GPIO_CDC_DIPSTATUS_OFFSET 0x0
+#define RADIO_GPIO_CDC_DIPSTATUS_WIDTH 0x8
+#define RADIO_GPIO_CDC_DIPSTATUS_DEFAULT 0x0
+
+/* Type = wPlsH */
+#define RADIO_SW_TRIGGER_ADDR 0x20
+#define RADIO_SW_TRIGGER_MASK 0x1
+#define RADIO_SW_TRIGGER_OFFSET 0x0
+#define RADIO_SW_TRIGGER_WIDTH 0x1
+#define RADIO_SW_TRIGGER_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_ENABLE_ADDR 0x24
+#define RADIO_CDC_ENABLE_MASK 0x1
+#define RADIO_CDC_ENABLE_OFFSET 0x0
+#define RADIO_CDC_ENABLE_WIDTH 0x1
+#define RADIO_CDC_ENABLE_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_ADDR 0x24
+#define RADIO_CDC_ERROR_MASK 0x2
+#define RADIO_CDC_ERROR_OFFSET 0x1
+#define RADIO_CDC_ERROR_WIDTH 0x1
+#define RADIO_CDC_ERROR_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_ADDR 0x24
+#define RADIO_CDC_STATUS_MASK 0x4
+#define RADIO_CDC_STATUS_OFFSET 0x2
+#define RADIO_CDC_STATUS_WIDTH 0x1
+#define RADIO_CDC_STATUS_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_LOOPBACK_ADDR 0x28
+#define RADIO_CDC_LOOPBACK_MASK 0x1
+#define RADIO_CDC_LOOPBACK_OFFSET 0x0
+#define RADIO_CDC_LOOPBACK_WIDTH 0x1
+#define RADIO_CDC_LOOPBACK_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_SINK_ENABLE_ADDR 0x2c
+#define RADIO_SINK_ENABLE_MASK 0x1
+#define RADIO_SINK_ENABLE_OFFSET 0x0
+#define RADIO_SINK_ENABLE_WIDTH 0x1
+#define RADIO_SINK_ENABLE_DEFAULT 0x1
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_31_0_ADDR 0x30
+#define RADIO_CDC_ERROR_31_0_MASK 0xffffffff
+#define RADIO_CDC_ERROR_31_0_OFFSET 0x0
+#define RADIO_CDC_ERROR_31_0_WIDTH 0x20
+#define RADIO_CDC_ERROR_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_63_32_ADDR 0x34
+#define RADIO_CDC_ERROR_63_32_MASK 0xffffffff
+#define RADIO_CDC_ERROR_63_32_OFFSET 0x0
+#define RADIO_CDC_ERROR_63_32_WIDTH 0x20
+#define RADIO_CDC_ERROR_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_95_64_ADDR 0x38
+#define RADIO_CDC_ERROR_95_64_MASK 0xffffffff
+#define RADIO_CDC_ERROR_95_64_OFFSET 0x0
+#define RADIO_CDC_ERROR_95_64_WIDTH 0x20
+#define RADIO_CDC_ERROR_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_127_96_ADDR 0x3c
+#define RADIO_CDC_ERROR_127_96_MASK 0xffffffff
+#define RADIO_CDC_ERROR_127_96_OFFSET 0x0
+#define RADIO_CDC_ERROR_127_96_WIDTH 0x20
+#define RADIO_CDC_ERROR_127_96_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_31_0_ADDR 0x40
+#define RADIO_CDC_STATUS_31_0_MASK 0xffffffff
+#define RADIO_CDC_STATUS_31_0_OFFSET 0x0
+#define RADIO_CDC_STATUS_31_0_WIDTH 0x20
+#define RADIO_CDC_STATUS_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_63_32_ADDR 0x44
+#define RADIO_CDC_STATUS_63_32_MASK 0xffffffff
+#define RADIO_CDC_STATUS_63_32_OFFSET 0x0
+#define RADIO_CDC_STATUS_63_32_WIDTH 0x20
+#define RADIO_CDC_STATUS_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_95_64_ADDR 0x48
+#define RADIO_CDC_STATUS_95_64_MASK 0xffffffff
+#define RADIO_CDC_STATUS_95_64_OFFSET 0x0
+#define RADIO_CDC_STATUS_95_64_WIDTH 0x20
+#define RADIO_CDC_STATUS_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_127_96_ADDR 0x4c
+#define RADIO_CDC_STATUS_127_96_MASK 0xffffffff
+#define RADIO_CDC_STATUS_127_96_OFFSET 0x0
+#define RADIO_CDC_STATUS_127_96_WIDTH 0x20
+#define RADIO_CDC_STATUS_127_96_DEFAULT 0x0
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_source
+ * with prefix fram_ @ address 0x1000
+ *-----------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_PACKET_DATA_SIZE_ADDR 0x1000
+#define FRAM_PACKET_DATA_SIZE_MASK 0x7f
+#define FRAM_PACKET_DATA_SIZE_OFFSET 0x0
+#define FRAM_PACKET_DATA_SIZE_WIDTH 0x7
+#define FRAM_PACKET_DATA_SIZE_DEFAULT 0x0
+
+/* Type = rwpdef */
+#define FRAM_PAUSE_DATA_SIZE_ADDR 0x1004
+#define FRAM_PAUSE_DATA_SIZE_MASK 0x7f
+#define FRAM_PAUSE_DATA_SIZE_OFFSET 0x0
+#define FRAM_PAUSE_DATA_SIZE_WIDTH 0x7
+#define FRAM_PAUSE_DATA_SIZE_DEFAULT 0x0
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
new file mode 100644
index 000000000000..c9b05866fd78
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "roe_radio_ctrl.h"
+#include "xroe-traffic-gen.h"
+
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ */
+static void utils_sysfs_store_wrapper(struct device *dev, u32 address,
+ u32 offset, u32 mask, u32 value)
+{
+ void __iomem *working_address;
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0;
+ u32 buffer = 0;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+}
+
+/**
+ * utils_sysfs_show_wrapper - Wraps the "show" function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+static u32 utils_sysfs_show_wrapper(struct device *dev, u32 address, u32 offset,
+ u32 mask)
+{
+ void __iomem *working_address;
+ u32 buffer;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
+
+/**
+ * radio_id_show - Returns the block's ID number
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's ID (0x1179649 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_id;
+
+ radio_id = utils_sysfs_show_wrapper(dev, RADIO_ID_ADDR,
+ RADIO_ID_OFFSET,
+ RADIO_ID_MASK);
+ return sprintf(buf, "%d\n", radio_id);
+}
+static DEVICE_ATTR_RO(radio_id);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout enable status to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_enable;
+
+ timeout_enable = utils_sysfs_show_wrapper(dev,
+ RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK);
+ return sprintf(buf, "%d\n", timeout_enable);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout enable
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout enable
+ * status to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 enable = 0;
+
+ strncpy(xroe_tmp, buf, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK, enable);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_enable);
+
+/**
+ * timeout_status_show - Returns the timeout status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's timeout status (0x1 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout;
+
+ timeout = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_STATUS_ADDR,
+ RADIO_TIMEOUT_STATUS_OFFSET,
+ RADIO_TIMEOUT_STATUS_MASK);
+ return sprintf(buf, "%d\n", timeout);
+}
+static DEVICE_ATTR_RO(timeout_status);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_value;
+
+ timeout_value = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK);
+ return sprintf(buf, "%d\n", timeout_value);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout value
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 timeout_value;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &timeout_value);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK, timeout_value);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_value);
+
+/**
+ * ledmode_show - Returns the current LED mode
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED mode value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledmode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledmode;
+
+ ledmode = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK);
+ return sprintf(buf, "%d\n", ledmode);
+}
+
+/**
+ * ledmode_store - Writes to the current LED mode register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED mode value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledmode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledmode;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledmode);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK, ledmode);
+ return count;
+}
+static DEVICE_ATTR_RW(ledmode);
+
+/**
+ * ledgpio_show - Returns the current LED gpio
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED gpio value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledgpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledgpio;
+
+ ledgpio = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "%d\n", ledgpio);
+}
+
+/**
+ * ledgpio_store - Writes to the current LED gpio register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED gpio value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledgpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledgpio;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledgpio);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK, ledgpio);
+ return count;
+}
+static DEVICE_ATTR_RW(ledgpio);
+
+/**
+ * dip_status_show - Returns the current DIP switch value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the GPIO DIP switch value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t dip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 dip_status;
+
+ dip_status = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "0x%08x\n", dip_status);
+}
+static DEVICE_ATTR_RO(dip_status);
+
+/**
+ * sw_trigger_show - Returns the current SW trigger status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's SW trigger status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t sw_trigger_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 sw_trigger;
+
+ sw_trigger = utils_sysfs_show_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK);
+ return sprintf(buf, "%d\n", sw_trigger);
+}
+
+/**
+ * sw_trigger_store - Writes to the SW trigger status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's SW trigger
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t sw_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sw_trigger;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sw_trigger);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK, sw_trigger);
+ return count;
+}
+static DEVICE_ATTR_RW(sw_trigger);
+
+/**
+ * radio_enable_show - Returns the current radio enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's radio enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_enable;
+
+ radio_enable = utils_sysfs_show_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK);
+ return sprintf(buf, "%d\n", radio_enable);
+}
+
+/**
+ * radio_enable_store - Writes to the radio enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio enable
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK,
+ radio_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_enable);
+
+/**
+ * radio_error_show - Returns the current radio error status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the error status
+ *
+ * Reads and writes the traffic gen's radio error value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_error;
+
+ radio_error = utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_error);
+}
+static DEVICE_ATTR_RO(radio_error);
+
+/**
+ * radio_status_show - Returns the current radio status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the status
+ *
+ * Reads and writes the traffic gen's radio status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_status;
+
+ radio_status = utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_status);
+}
+static DEVICE_ATTR_RO(radio_status);
+
+/**
+ * radio_loopback_show - Returns the current radio loopback status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio loopback value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_loopback;
+
+ radio_loopback = utils_sysfs_show_wrapper(dev,
+ RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK);
+ return sprintf(buf, "%d\n", radio_loopback);
+}
+
+/**
+ * radio_loopback_store - Writes to the radio loopback register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio loopback
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_loopback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_loopback;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_loopback);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK, radio_loopback);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_loopback);
+
+/**
+ * radio_sink_enable_show - Returns the current radio sink enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio sink enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_sink_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 sink_enable;
+
+ sink_enable = utils_sysfs_show_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK);
+ return sprintf(buf, "%d\n", sink_enable);
+}
+
+/**
+ * radio_sink_enable_store - Writes to the radio sink enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio sink
+ * enable value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_sink_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sink_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sink_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK, sink_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_sink_enable);
+
+/**
+ * antenna_status_show - Returns the status for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's status for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status_0_31;
+ u32 status_63_32;
+ u32 status_95_64;
+ u32 status_127_96;
+
+ status_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_31_0_ADDR,
+ RADIO_CDC_STATUS_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_31_0_MASK));
+ status_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_63_32_ADDR,
+ RADIO_CDC_STATUS_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_63_32_MASK));
+ status_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_95_64_ADDR,
+ RADIO_CDC_STATUS_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_95_64_MASK));
+ status_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_127_96_ADDR,
+ RADIO_CDC_STATUS_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ status_0_31, status_63_32, status_95_64, status_127_96);
+}
+static DEVICE_ATTR_RO(antenna_status);
+
+/**
+ * antenna_error_show - Returns the error for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's error for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 error_0_31;
+ u32 error_63_32;
+ u32 error_95_64;
+ u32 error_127_96;
+
+ error_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_31_0_ADDR,
+ RADIO_CDC_ERROR_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_31_0_MASK));
+ error_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_63_32_ADDR,
+ RADIO_CDC_ERROR_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_63_32_MASK));
+ error_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_95_64_ADDR,
+ RADIO_CDC_ERROR_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_95_64_MASK));
+ error_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_127_96_ADDR,
+ RADIO_CDC_ERROR_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ error_0_31, error_63_32, error_95_64, error_127_96);
+}
+static DEVICE_ATTR_RO(antenna_error);
+
+/**
+ * framer_packet_size_show - Returns the size of the framer's packet
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer packet size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_packet_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 packet_size;
+
+ packet_size = utils_sysfs_show_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", packet_size);
+}
+
+/**
+ * framer_packet_size_store - Writes to the framer's packet size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer packet
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_packet_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 packet_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &packet_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK, packet_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_packet_size);
+
+/**
+ * framer_pause_size_show - Returns the size of the framer's pause
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer pause size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_pause_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 pause_size;
+
+ pause_size = utils_sysfs_show_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", pause_size);
+}
+
+/**
+ * framer_pause_size_store - Writes to the framer's pause size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer pause
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_pause_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 pause_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &pause_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK, pause_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_pause_size);
+
+static struct attribute *xroe_traffic_gen_attrs[] = {
+ &dev_attr_radio_id.attr,
+ &dev_attr_timeout_enable.attr,
+ &dev_attr_timeout_status.attr,
+ &dev_attr_timeout_value.attr,
+ &dev_attr_ledmode.attr,
+ &dev_attr_ledgpio.attr,
+ &dev_attr_dip_status.attr,
+ &dev_attr_sw_trigger.attr,
+ &dev_attr_radio_enable.attr,
+ &dev_attr_radio_error.attr,
+ &dev_attr_radio_status.attr,
+ &dev_attr_radio_loopback.attr,
+ &dev_attr_radio_sink_enable.attr,
+ &dev_attr_antenna_status.attr,
+ &dev_attr_antenna_error.attr,
+ &dev_attr_framer_packet_size.attr,
+ &dev_attr_framer_pause_size.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(xroe_traffic_gen);
+
+/**
+ * xroe_traffic_gen_sysfs_init - Creates the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroetrafficgen sysfs directory and entries
+ */
+int xroe_traffic_gen_sysfs_init(struct device *dev)
+{
+ int ret;
+
+ dev->groups = xroe_traffic_gen_groups;
+ ret = sysfs_create_group(&dev->kobj, *xroe_traffic_gen_groups);
+ if (ret)
+ dev_err(dev, "sysfs creation failed\n");
+
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_sysfs_exit - Deletes the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Deletes the xroetrafficgen sysfs directory and entries
+ */
+void xroe_traffic_gen_sysfs_exit(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, *xroe_traffic_gen_groups);
+}
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
new file mode 100644
index 000000000000..1ed6e488d38d
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "xroe-traffic-gen.h"
+
+#define DRIVER_NAME "xroe_traffic_gen"
+
+static struct platform_driver xroe_traffic_gen_driver;
+
+/**
+ * xroe_traffic_gen_probe - Probes the device tree to locate the traffic gen
+ * block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the traffic gen block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int xroe_traffic_gen_probe(struct platform_device *pdev)
+{
+ struct xroe_traffic_gen_local *lp;
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ /*
+ * TODO: Use platform_get_resource_byname() instead when the DT entry
+ * of the traffic gen block has been finalised (when it gets out of
+ * the development stage).
+ */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_traffic_gen_sysfs_init(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_remove - Removes the sysfs entries created by the driver
+ * @pdev: The structure containing the device's details
+ *
+ * Removes the sysfs entries created by the driver
+ *
+ * Return: 0
+ */
+static int xroe_traffic_gen_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ xroe_traffic_gen_sysfs_exit(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the traffic gen driver and creates the sysfs entries related
+ * to it
+ */
+static int __init xroe_traffic_gen_init(void)
+{
+ int ret;
+
+ pr_info("XROE traffic generator driver init\n");
+ ret = platform_driver_register(&xroe_traffic_gen_driver);
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_exit - Destroys the driver
+ *
+ * Unregisters the traffic gen driver
+ */
+static void __exit xroe_traffic_gen_exit(void)
+{
+ platform_driver_unregister(&xroe_traffic_gen_driver);
+ pr_debug("XROE traffic generator driver exit\n");
+}
+
+static const struct of_device_id xroe_traffic_gen_of_match[] = {
+ { .compatible = "xlnx,roe-traffic-gen-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xroe_traffic_gen_of_match);
+
+static struct platform_driver xroe_traffic_gen_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xroe_traffic_gen_of_match,
+ },
+ .probe = xroe_traffic_gen_probe,
+ .remove = xroe_traffic_gen_remove,
+};
+
+module_init(xroe_traffic_gen_init);
+module_exit(xroe_traffic_gen_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Radio over Ethernet Traffic Generator driver");
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.h b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
new file mode 100644
index 000000000000..55d968d89e10
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+struct xroe_traffic_gen_local {
+ void __iomem *base_addr;
+};
+
+enum { XROE_SIZE_MAX = 15 };
+
+int xroe_traffic_gen_sysfs_init(struct device *dev);
+void xroe_traffic_gen_sysfs_exit(struct device *dev);
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 65e9045dafe6..463814782664 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -83,8 +83,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
port->mapbase += prop;
+ port->mapsize -= prop;
+ }
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 458fc3d9d48c..a7a36da99925 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2059,17 +2059,45 @@ sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned long flags;
+ unsigned int lcr_h, old_cr;
tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
-
/* The SBSA UART only supports 8n1 without hardware flow control. */
- termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
- termios->c_cflag |= CS8 | CLOCAL;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ lcr_h = UART01x_LCRH_WLEN_5;
+ break;
+ case CS6:
+ lcr_h = UART01x_LCRH_WLEN_6;
+ break;
+ case CS7:
+ lcr_h = UART01x_LCRH_WLEN_7;
+ break;
+ default:
+ lcr_h = UART01x_LCRH_WLEN_8;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ lcr_h |= UART01x_LCRH_STP2;
+ if (termios->c_cflag & PARENB) {
+ lcr_h |= UART01x_LCRH_PEN;
+ if (!(termios->c_cflag & PARODD))
+ lcr_h |= UART01x_LCRH_EPS;
+ if (termios->c_cflag & CMSPAR)
+ lcr_h |= UART011_LCRH_SPS;
+ }
+ if (uap->fifosize > 1)
+ lcr_h |= UART01x_LCRH_FEN;
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, CS8, uap->fixed_baud);
pl011_setup_status_masks(port, termios);
+ /* first, disable everything */
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write(0, uap, REG_CR);
+ pl011_write_lcr_h(uap, lcr_h);
+ pl011_write(old_cr, uap, REG_CR);
spin_unlock_irqrestore(&port->lock, flags);
}
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 56e108902502..a20ce70eb153 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -228,6 +228,13 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
is_rxbs_support = cdns_uart->quirks & CDNS_UART_RXBS_SUPPORT;
+ /*
+ * RXEMPTY will never be set if RX is disabled as read bytes
+ * will not be removed from the FIFO
+ */
+ if (readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS)
+ return;
+
while ((readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
if (is_rxbs_support)
@@ -375,6 +382,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
@@ -1143,6 +1152,13 @@ static struct uart_driver cdns_uart_uart_driver;
*/
static void cdns_uart_console_putchar(struct uart_port *port, int ch)
{
+ unsigned int ctrl_reg;
+
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ while (ctrl_reg & CDNS_UART_CR_TX_DIS) {
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ cpu_relax();
+ }
while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)
cpu_relax();
writel(ch, port->membase + CDNS_UART_FIFO);
@@ -1235,9 +1251,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
- while ((readl(port->membase + CDNS_UART_SR) &
- (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE)) !=
- CDNS_UART_SR_TXEMPTY)
+ while (cdns_uart_tx_empty(port) != TIOCSER_TEMT)
cpu_relax();
/* restore interrupt state */
@@ -1262,6 +1276,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ unsigned long time_out;
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
@@ -1272,6 +1287,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* Wait for tx_empty before setting up the console */
+ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
+
+ while (time_before(jiffies, time_out) &&
+ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
+ cpu_relax();
+
return uart_set_options(port, co, baud, parity, bits, flow);
}
@@ -1558,6 +1580,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
+ port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 202ee81cfc2b..bae8e2904c56 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -2,6 +2,7 @@
menuconfig UIO
tristate "Userspace I/O drivers"
depends on MMU
+ select DMA_SHARED_BUFFER
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
@@ -165,4 +166,27 @@ config UIO_HV_GENERIC
to network and storage devices from userspace.
If you compile this as a module, it will be called uio_hv_generic.
+
+config UIO_XILINX_APM
+ tristate "Xilinx AXI Performance Monitor driver"
+ depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP
+ help
+ This driver is developed for AXI Performance Monitor IP, designed to
+ monitor AXI4 traffic for performance analysis of AXI bus in the
+ system. Driver maps HW registers and parameters to userspace.
+
+ To compile this driver as a module, choose M here; the module
+ will be called uio_xilinx_apm.
+
+config UIO_XILINX_AI_ENGINE
+ tristate "Xilinx AI Engine driver"
+ select IRQ_SIM
+ select UIO_DMEM_GENIRQ
+ select UIO_PDRV_GENIRQ
+ help
+ The driver for Xilinx AI Engine that utilizes the uio_dmem_genirq.
+ The userspace library will use this to interact with the AI Engine
+ hardware, as well as for the memory allocation.
+ Say 'y' only for platforms with the AI Engine IP.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index c285dd2a4539..622cb4404d1d 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+uio-y := uio_core.o uio_dmabuf.o
+
obj-$(CONFIG_UIO) += uio.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
@@ -9,5 +11,7 @@ obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
obj-$(CONFIG_UIO_MF624) += uio_mf624.o
+obj-$(CONFIG_UIO_XILINX_APM) += uio_xilinx_apm.o
obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o
obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o
+obj-$(CONFIG_UIO_XILINX_AI_ENGINE) += uio_xilinx_ai_engine.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio_core.c
index 6e725c6c6256..3bb6f93aecb6 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio_core.c
@@ -24,6 +24,12 @@
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
#define UIO_MAX_DEVICES (1U << MINORBITS)
@@ -454,6 +460,8 @@ static irqreturn_t uio_interrupt(int irq, void *dev_id)
struct uio_listener {
struct uio_device *dev;
s32 event_count;
+ struct list_head dbufs;
+ struct mutex dbufs_lock; /* protect @dbufs */
};
static int uio_open(struct inode *inode, struct file *filep)
@@ -500,6 +508,9 @@ static int uio_open(struct inode *inode, struct file *filep)
if (ret)
goto err_infoopen;
+ INIT_LIST_HEAD(&listener->dbufs);
+ mutex_init(&listener->dbufs_lock);
+
return 0;
err_infoopen:
@@ -529,6 +540,10 @@ static int uio_release(struct inode *inode, struct file *filep)
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
+ ret = uio_dmabuf_cleanup(idev, &listener->dbufs, &listener->dbufs_lock);
+ if (ret)
+ dev_err(&idev->dev, "failed to clean up the dma bufs\n");
+
mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
@@ -652,6 +667,33 @@ out:
return retval ? retval : sizeof(s32);
}
+static long uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct uio_listener *listener = filep->private_data;
+ struct uio_device *idev = listener->dev;
+ long ret;
+
+ if (!idev->info)
+ return -EIO;
+
+ switch (cmd) {
+ case UIO_IOC_MAP_DMABUF:
+ ret = uio_dmabuf_map(idev, &listener->dbufs,
+ &listener->dbufs_lock, (void __user *)arg);
+ break;
+ case UIO_IOC_UNMAP_DMABUF:
+ ret = uio_dmabuf_unmap(idev, &listener->dbufs,
+ &listener->dbufs_lock,
+ (void __user *)arg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int uio_find_mem_index(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
@@ -821,6 +863,7 @@ static const struct file_operations uio_fops = {
.write = uio_write,
.mmap = uio_mmap,
.poll = uio_poll,
+ .unlocked_ioctl = uio_ioctl,
.fasync = uio_fasync,
.llseek = noop_llseek,
};
diff --git a/drivers/uio/uio_dmabuf.c b/drivers/uio/uio_dmabuf.c
new file mode 100644
index 000000000000..b18f1469f6c8
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <linux/slab.h>
+
+#include <uapi/linux/uio/uio.h>
+
+#include "uio_dmabuf.h"
+
+struct uio_dmabuf_mem {
+ int dbuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ struct list_head list;
+};
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy from user\n");
+ goto err;
+ }
+
+ dbuf = dma_buf_get(args.dbuf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf\n");
+ return PTR_ERR(dbuf);
+ }
+
+ dbuf_attach = dma_buf_attach(dbuf, dev->dev.parent);
+ if (IS_ERR(dbuf_attach)) {
+ dev_err(dev->dev.parent, "failed to attach dmabuf\n");
+ ret = PTR_ERR(dbuf_attach);
+ goto err_put;
+ }
+
+ switch (args.dir) {
+ case UIO_DMABUF_DIR_BIDIR:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+ case UIO_DMABUF_DIR_TO_DEV:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UIO_DMABUF_DIR_FROM_DEV:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ /* Not needed with check. Just here for any future change */
+ dev_err(dev->dev.parent, "invalid direction\n");
+ ret = -EINVAL;
+ goto err_detach;
+ }
+
+ sgt = dma_buf_map_attachment(dbuf_attach, dir);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev.parent, "failed to get dmabuf scatterlist\n");
+ ret = PTR_ERR(sgt);
+ goto err_detach;
+ }
+
+ /* Accept only contiguous one */
+ if (sgt->nents != 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ continue;
+
+ if (sg_dma_address(s) != next_addr) {
+ dev_err(dev->dev.parent,
+ "dmabuf not contiguous\n");
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
+ }
+
+ dbuf_mem = kzalloc(sizeof(*dbuf_mem), GFP_KERNEL);
+ if (!dbuf_mem) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ dbuf_mem->dbuf_fd = args.dbuf_fd;
+ dbuf_mem->dbuf = dbuf;
+ dbuf_mem->dbuf_attach = dbuf_attach;
+ dbuf_mem->sgt = sgt;
+ dbuf_mem->dir = dir;
+ args.dma_addr = sg_dma_address(sgt->sgl);
+ args.size = dbuf->size;
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ dev_err(dev->dev.parent, "failed to copy to user\n");
+ goto err_free;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_add(&dbuf_mem->list, dbufs);
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+
+err_free:
+ kfree(dbuf_mem);
+err_unmap:
+ dma_buf_unmap_attachment(dbuf_attach, sgt, dir);
+err_detach:
+ dma_buf_detach(dbuf, dbuf_attach);
+err_put:
+ dma_buf_put(dbuf);
+err:
+ return ret;
+}
+
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args)
+
+{
+ struct uio_dmabuf_args args;
+ struct uio_dmabuf_mem *dbuf_mem;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry(dbuf_mem, dbufs, list) {
+ if (dbuf_mem->dbuf_fd == args.dbuf_fd)
+ break;
+ }
+
+ if (dbuf_mem->dbuf_fd != args.dbuf_fd) {
+ dev_err(dev->dev.parent, "failed to find the dmabuf (%d)\n",
+ args.dbuf_fd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ list_del(&dbuf_mem->list);
+ mutex_unlock(dbufs_lock);
+
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+
+ memset(&args, 0x0, sizeof(args));
+
+ if (copy_to_user(user_args, &args, sizeof(args))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(dbufs_lock);
+err:
+ return ret;
+}
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock)
+{
+ struct uio_dmabuf_mem *dbuf_mem, *next;
+
+ mutex_lock(dbufs_lock);
+ list_for_each_entry_safe(dbuf_mem, next, dbufs, list) {
+ list_del(&dbuf_mem->list);
+ dma_buf_unmap_attachment(dbuf_mem->dbuf_attach, dbuf_mem->sgt,
+ dbuf_mem->dir);
+ dma_buf_detach(dbuf_mem->dbuf, dbuf_mem->dbuf_attach);
+ dma_buf_put(dbuf_mem->dbuf);
+ kfree(dbuf_mem);
+ }
+ mutex_unlock(dbufs_lock);
+
+ return 0;
+}
diff --git a/drivers/uio/uio_dmabuf.h b/drivers/uio/uio_dmabuf.h
new file mode 100644
index 000000000000..30200306d53a
--- /dev/null
+++ b/drivers/uio/uio_dmabuf.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ *
+ * DMA buf support for UIO device
+ *
+ */
+
+#ifndef _UIO_DMABUF_H_
+#define _UIO_DMABUF_H_
+
+struct uio_device;
+struct list_head;
+struct mutex;
+
+long uio_dmabuf_map(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+long uio_dmabuf_unmap(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock, void __user *user_args);
+
+int uio_dmabuf_cleanup(struct uio_device *dev, struct list_head *dbufs,
+ struct mutex *dbufs_lock);
+
+#endif
diff --git a/drivers/uio/uio_xilinx_ai_engine.c b/drivers/uio/uio_xilinx_ai_engine.c
new file mode 100644
index 000000000000..174efa805b52
--- /dev/null
+++ b/drivers/uio/uio_xilinx_ai_engine.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx UIO driver for AI Engine
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq_sim.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/uio_dmem_genirq.h>
+#include <linux/platform_device.h>
+#include <linux/uio_driver.h>
+
+#define DRIVER_NAME "xilinx-aiengine"
+#define XILINX_AI_ENGINE_MAX_IRQ 4
+
+static uint xilinx_ai_engine_mem_cnt = 1;
+module_param_named(mem_cnt, xilinx_ai_engine_mem_cnt, uint, 0444);
+MODULE_PARM_DESC(mem_cnt, "Dynamic memory allocation count (default: 1)");
+
+static uint xilinx_ai_engine_mem_size = 32 * 1024 * 1024;
+module_param_named(mem_size, xilinx_ai_engine_mem_size, uint, 0444);
+MODULE_PARM_DESC(mem_size,
+ "Dynamic memory allocation size in bytes (default: 32 MB)");
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t xilinx_ai_engine_debugfs_write(struct file *f,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct irq_sim *irq_sim = file_inode(f)->i_private;
+
+ irq_sim_fire(irq_sim, 1);
+
+ return size;
+}
+
+static const struct file_operations debugfs_ops = {
+ .owner = THIS_MODULE,
+ .write = xilinx_ai_engine_debugfs_write,
+};
+
+/**
+ * xilinx_ai_engine_debugfs_init - Initialize the debugfs for irq sim
+ * @pdev: platform device to simulate irq for
+ * @irq_sim: simualated irq
+ *
+ * Initialize the debugfs for irq simulation. This allows to generate
+ * the simulated interrupt from user.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_debugfs_init(struct platform_device *pdev,
+ struct irq_sim *irq_sim)
+{
+ int ret;
+ struct dentry *debugfs_dir, *debugfs_file;
+
+ debugfs_dir = debugfs_create_dir("xilinx-ai-engine", NULL);
+ if (!debugfs_dir)
+ return -ENODEV;
+
+ debugfs_file = debugfs_create_file(dev_name(&pdev->dev), 0644,
+ debugfs_dir, irq_sim, &debugfs_ops);
+ if (!debugfs_file) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ debugfs_remove_recursive(debugfs_dir);
+ return ret;
+}
+
+/**
+ * xilinx_ai_engine_simulate_irq - Simulate the irq
+ * @pdev: platform device to simulate irq for
+ *
+ * Simulate the irq so the irq can be generated from user. This is only for
+ * debugging purpose.
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ struct irq_sim *irq_sim;
+ int irq, ret;
+
+ irq_sim = devm_kzalloc(&pdev->dev, sizeof(*irq_sim), GFP_KERNEL);
+ if (!irq_sim)
+ return -ENOMEM;
+
+ /*
+ * Sometimes, the returned base value is 0, so allocate 2 irqs, and
+ * always use the 2nd one.
+ */
+ irq = devm_irq_sim_init(&pdev->dev, irq_sim, 2);
+ if (irq < 0)
+ return irq;
+
+ ret = xilinx_ai_engine_debugfs_init(pdev, irq_sim);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed create debugfs for sim irq");
+ return ret;
+ }
+
+ return irq_sim_irqnum(irq_sim, 1);
+}
+
+#else
+
+static int xilinx_ai_engine_simulate_irq(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int xilinx_ai_engine_mem_index(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -1;
+ return (int)vma->vm_pgoff;
+ }
+ return -1;
+}
+
+static const struct vm_operations_struct xilinx_ai_engine_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+static int xilinx_ai_engine_mmap(struct uio_info *info,
+ struct vm_area_struct *vma)
+{
+ int mi = xilinx_ai_engine_mem_index(info, vma);
+ struct uio_mem *mem;
+
+ if (mi < 0)
+ return -EINVAL;
+ mem = info->mem + mi;
+
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ vma->vm_ops = &xilinx_ai_engine_vm_ops;
+ /*
+ * Make the dynamic memory mapping as write-combined. Only first one
+ * will be the mmio region, which will be mapped as noncached.
+ */
+ if (mi < 1)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * We cannot use the vm_iomap_memory() helper here,
+ * because vma->vm_pgoff is the map index we looked
+ * up above in uio_find_mem_index(), rather than an
+ * actual page offset into the mmap.
+ *
+ * So we just do the physical mmap without a page
+ * offset.
+ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int xilinx_ai_engine_probe(struct platform_device *pdev)
+{
+ struct platform_device *uio;
+ struct uio_dmem_genirq_pdata *pdata;
+ unsigned int i;
+ static const char * const interrupt_names[] = { "interrupt0",
+ "interrupt1",
+ "interrupt2",
+ "interrupt3" };
+ int ret;
+
+ uio = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
+ if (!uio)
+ return -ENOMEM;
+ uio->driver_override = "uio_dmem_genirq";
+ uio->dev.parent = &pdev->dev;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ pdata->num_dynamic_regions = xilinx_ai_engine_mem_cnt;
+ pdata->dynamic_region_sizes = &xilinx_ai_engine_mem_size;
+ pdata->uioinfo.name = DRIVER_NAME;
+ pdata->uioinfo.version = "devicetree";
+ pdata->uioinfo.mmap = xilinx_ai_engine_mmap;
+ /* Set the offset value as it's map index for each memory */
+ for (i = 0; i < MAX_UIO_MAPS; i++)
+ pdata->uioinfo.mem[i].offs = i << PAGE_SHIFT;
+
+ /* TODO: Only one interrupt is supported out of 4 */
+ for (i = 0; i < XILINX_AI_ENGINE_MAX_IRQ; i++) {
+ ret = platform_get_irq_byname(pdev, interrupt_names[i]);
+ if (ret >= 0) {
+ dev_info(&pdev->dev, "%s is used", interrupt_names[i]);
+ break;
+ }
+ }
+
+ /* Interrupt is optional */
+ if (ret < 0) {
+ ret = xilinx_ai_engine_simulate_irq(pdev);
+ if (ret < 0)
+ ret = UIO_IRQ_CUSTOM;
+ }
+ pdata->uioinfo.irq = ret;
+
+ ret = platform_device_add_data(uio, pdata, sizeof(*pdata));
+ if (ret)
+ goto err_out;
+
+ /* Mirror the parent device resource to uio device */
+ ret = platform_device_add_resources(uio, pdev->resource,
+ pdev->num_resources);
+ if (ret)
+ goto err_out;
+
+ /* Configure the dma for uio device using the parent of_node */
+ uio->dev.bus = &platform_bus_type;
+ ret = of_dma_configure(&uio->dev, of_node_get(pdev->dev.of_node), true);
+ of_node_put(pdev->dev.of_node);
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add(uio);
+ if (ret)
+ goto err_out;
+ platform_set_drvdata(uio, pdata);
+ platform_set_drvdata(pdev, uio);
+
+ dev_info(&pdev->dev, "Xilinx AI Engine UIO driver probed");
+ return 0;
+
+err_out:
+ platform_device_put(uio);
+ dev_err(&pdev->dev,
+ "failed to probe Xilinx AI Engine UIO driver");
+ return ret;
+}
+
+static int xilinx_ai_engine_remove(struct platform_device *pdev)
+{
+ struct platform_device *uio = platform_get_drvdata(pdev);
+
+ platform_device_unregister(uio);
+ of_node_put(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_ai_engine_of_match[] = {
+ { .compatible = "xlnx,ai_engine", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_ai_engine_of_match);
+
+static struct platform_driver xilinx_ai_engine_driver = {
+ .probe = xilinx_ai_engine_probe,
+ .remove = xilinx_ai_engine_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xilinx_ai_engine_of_match,
+ },
+};
+
+module_platform_driver(xilinx_ai_engine_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/uio/uio_xilinx_apm.c b/drivers/uio/uio_xilinx_apm.c
new file mode 100644
index 000000000000..e8d289fd2dfa
--- /dev/null
+++ b/drivers/uio/uio_xilinx_apm.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AXI Performance Monitor
+ *
+ * Copyright (C) 2013 - 2019 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver is developed for AXI Performance Monitor IP,
+ * designed to monitor AXI4 traffic for performance analysis
+ * of AXI bus in the system. Driver maps HW registers and parameters
+ * to userspace. Userspace need not clear the interrupt of IP since
+ * driver clears the interrupt.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uio_driver.h>
+
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+#define DRV_NAME "xilinxapm_uio"
+#define DRV_VERSION "1.0"
+#define UIO_DUMMY_MEMSIZE 4096
+#define XAPM_MODE_ADVANCED 1
+#define XAPM_MODE_PROFILE 2
+#define XAPM_MODE_TRACE 3
+
+/**
+ * struct xapm_param - HW parameters structure
+ * @mode: Mode in which APM is working
+ * @maxslots: Maximum number of Slots in APM
+ * @eventcnt: Event counting enabled in APM
+ * @eventlog: Event logging enabled in APM
+ * @sampledcnt: Sampled metric counters enabled in APM
+ * @numcounters: Number of counters in APM
+ * @metricwidth: Metric Counter width (32/64)
+ * @sampledwidth: Sampled metric counter width
+ * @globalcntwidth: Global Clock counter width
+ * @scalefactor: Scaling factor
+ * @isr: Interrupts info shared to userspace
+ * @is_32bit_filter: Flags for 32bit filter
+ * @clk: Clock handle
+ */
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+ struct clk *clk;
+};
+
+/**
+ * struct xapm_dev - Global driver structure
+ * @info: uio_info structure
+ * @param: xapm_param structure
+ * @regs: IOmapped base address
+ */
+struct xapm_dev {
+ struct uio_info info;
+ struct xapm_param param;
+ void __iomem *regs;
+};
+
+/**
+ * xapm_handler - Interrupt handler for APM
+ * @irq: IRQ number
+ * @info: Pointer to uio_info structure
+ *
+ * Return: Always returns IRQ_HANDLED
+ */
+static irqreturn_t xapm_handler(int irq, struct uio_info *info)
+{
+ struct xapm_dev *xapm = (struct xapm_dev *)info->priv;
+ void *ptr;
+
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ /* Clear the interrupt and copy the ISR value to userspace */
+ xapm->param.isr = readl(xapm->regs + XAPM_IS_OFFSET);
+ writel(xapm->param.isr, xapm->regs + XAPM_IS_OFFSET);
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xapm_getprop - Retrieves dts properties to param structure
+ * @pdev: Pointer to platform device
+ * @param: Pointer to param structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+static int xapm_getprop(struct platform_device *pdev, struct xapm_param *param)
+{
+ u32 mode = 0;
+ int ret;
+ struct device_node *node;
+
+ node = pdev->dev.of_node;
+
+ /* Retrieve required dts properties and fill param structure */
+ ret = of_property_read_u32(node, "xlnx,enable-profile", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-profile\n");
+ else if (mode)
+ param->mode = XAPM_MODE_PROFILE;
+
+ ret = of_property_read_u32(node, "xlnx,enable-trace", &mode);
+ if (ret < 0)
+ dev_info(&pdev->dev, "no property xlnx,enable-trace\n");
+ else if (mode)
+ param->mode = XAPM_MODE_TRACE;
+
+ ret = of_property_read_u32(node, "xlnx,num-monitor-slots",
+ &param->maxslots);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-monitor-slots");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-count",
+ &param->eventcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-count");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,enable-event-log",
+ &param->eventlog);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,enable-event-log");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,have-sampled-metric-cnt",
+ &param->sampledcnt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,have-sampled-metric-cnt");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,num-of-counters",
+ &param->numcounters);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,num-of-counters");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-width",
+ &param->metricwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metrics-sample-count-width",
+ &param->sampledwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property metrics-sample-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,global-count-width",
+ &param->globalcntwidth);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,global-count-width");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,metric-count-scale",
+ &param->scalefactor);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no property xlnx,metric-count-scale");
+ return ret;
+ }
+
+ param->is_32bit_filter = of_property_read_bool(node,
+ "xlnx,id-filter-32bit");
+
+ return 0;
+}
+
+/**
+ * xapm_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Returns: '0' on success and failure value on error
+ */
+
+static int xapm_probe(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm;
+ struct resource *res;
+ int irq;
+ int ret;
+ void *ptr;
+
+ xapm = devm_kzalloc(&pdev->dev, (sizeof(struct xapm_dev)), GFP_KERNEL);
+ if (!xapm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xapm->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xapm->regs)) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return PTR_ERR(xapm->regs);
+ }
+
+ xapm->param.clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xapm->param.clk)) {
+ if (PTR_ERR(xapm->param.clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "axi clock error\n");
+ return PTR_ERR(xapm->param.clk);
+ }
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ /* Initialize mode as Advanced so that if no mode in dts, default
+ * is Advanced
+ */
+ xapm->param.mode = XAPM_MODE_ADVANCED;
+ ret = xapm_getprop(pdev, &xapm->param);
+ if (ret < 0)
+ goto err_clk_dis;
+
+ xapm->info.mem[0].name = "xilinx_apm";
+ xapm->info.mem[0].addr = res->start;
+ xapm->info.mem[0].size = resource_size(res);
+ xapm->info.mem[0].memtype = UIO_MEM_PHYS;
+
+ xapm->info.mem[1].addr = (unsigned long)kzalloc(UIO_DUMMY_MEMSIZE,
+ GFP_KERNEL);
+ ptr = (unsigned long *)xapm->info.mem[1].addr;
+ xapm->info.mem[1].size = UIO_DUMMY_MEMSIZE;
+ xapm->info.mem[1].memtype = UIO_MEM_LOGICAL;
+
+ xapm->info.name = "axi-pmon";
+ xapm->info.version = DRV_VERSION;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get irq\n");
+ ret = irq;
+ goto err_clk_dis;
+ }
+
+ xapm->info.irq = irq;
+ xapm->info.handler = xapm_handler;
+ xapm->info.priv = xapm;
+ xapm->info.irq_flags = IRQF_SHARED;
+
+ memcpy(ptr, &xapm->param, sizeof(struct xapm_param));
+
+ ret = uio_register_device(&pdev->dev, &xapm->info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register to UIO\n");
+ goto err_clk_dis;
+ }
+
+ platform_set_drvdata(pdev, xapm);
+
+ dev_info(&pdev->dev, "Probed Xilinx APM\n");
+
+ return 0;
+
+err_clk_dis:
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ return ret;
+}
+
+/**
+ * xapm_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always returns '0'
+ */
+static int xapm_remove(struct platform_device *pdev)
+{
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ uio_unregister_device(&xapm->info);
+ clk_disable_unprepare(xapm->param.clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused xapm_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(xapm->param.clk);
+ return 0;
+};
+
+static int __maybe_unused xapm_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xapm_dev *xapm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(xapm->param.clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ return 0;
+};
+
+static const struct dev_pm_ops xapm_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xapm_runtime_suspend, xapm_runtime_resume)
+ SET_RUNTIME_PM_OPS(xapm_runtime_suspend,
+ xapm_runtime_resume, NULL)
+};
+
+static const struct of_device_id xapm_of_match[] = {
+ { .compatible = "xlnx,axi-perf-monitor", },
+ { /* end of table*/ }
+};
+
+MODULE_DEVICE_TABLE(of, xapm_of_match);
+
+static struct platform_driver xapm_driver = {
+ .driver = {
+ .name = "xilinx-axipmon",
+ .of_match_table = xapm_of_match,
+ .pm = &xapm_dev_pm_ops,
+ },
+ .probe = xapm_probe,
+ .remove = xapm_remove,
+};
+
+module_platform_driver(xapm_driver);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx AXI Performance Monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index c044fba463e4..83238293e5be 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -30,6 +30,7 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
static struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
@@ -58,6 +59,10 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
if (match && match->data) {
/* struct copy */
*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+ ci_pdata->usb_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy",
+ 0);
+ if (IS_ERR(ci_pdata->usb_phy))
+ return PTR_ERR(ci_pdata->usb_phy);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 48e4a5ca1835..b49edda341ea 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -57,6 +57,14 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
priv->enabled = enable;
}
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus) {
+ if (enable)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+ else
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+ }
+
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
@@ -65,6 +73,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
hw_port_test_set(ci, 5);
hw_port_test_set(ci, 0);
}
+
return 0;
};
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 6ed4b00dba96..ec02ea0ab20d 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -471,6 +471,11 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
return;
}
}
+
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 1);
+
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
@@ -480,6 +485,10 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL &&
+ ci->usb_phy && ci->usb_phy->set_vbus)
+ ci->usb_phy->set_vbus(ci->usb_phy, 0);
+
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 7a2304565a73..6f1b2ae48203 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -24,6 +24,7 @@ config USB_DWC3_ULPI
choice
bool "DWC3 Mode Selection"
default USB_DWC3_DUAL_ROLE if (USB && USB_GADGET)
+ default USB_DWC3_OTG if (USB && USB_GADGET && USB_OTG && USB_OTG_FSM)
default USB_DWC3_HOST if (USB && !USB_GADGET)
default USB_DWC3_GADGET if (!USB && USB_GADGET)
@@ -49,6 +50,15 @@ config USB_DWC3_DUAL_ROLE
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
+config USB_DWC3_OTG
+ bool "Dual Role mode + OTG"
+ depends on ((USB=y || USB=USB_DWC3) && \
+ (USB_GADGET=y || USB_GADGET=USB_DWC3) && \
+ USB_OTG && USB_OTG_FSM && PM)
+ help
+ This is the default mode of working of DWC3 controller where
+ both host and gadget features are enabled with OTG support.
+
endchoice
comment "Platform Glue Driver Support"
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..5ec60cd8d0b5 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -10,12 +10,17 @@ ifneq ($(CONFIG_TRACING),)
dwc3-y += trace.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
+ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)\
+ $(CONFIG_USB_DWC3_OTG)),)
dwc3-y += host.o
endif
-ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
- dwc3-y += gadget.o ep0.o
+ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE) $(CONFIG_USB_DWC3_OTG)),)
+ dwc3-y += gadget.o ep0.o gadget_hibernation.o
+endif
+
+ifneq ($(CONFIG_USB_DWC3_OTG),)
+ dwc3-y += otg.o
endif
ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index edc17155cb2b..d08db5780e96 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of_address.h>
#include <linux/reset.h>
#include <linux/usb/ch9.h>
@@ -244,6 +245,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode, then we can return early.
*/
+ if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->is_hibernated == true)
+ return 0;
+
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
@@ -295,7 +299,7 @@ done:
*/
static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
- u32 reg;
+ u32 reg, gfladj;
u32 dft;
if (dwc->revision < DWC3_REVISION_250A)
@@ -304,13 +308,27 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
if (dwc->fladj == 0)
return;
+ /* Save the initial DWC3_GFLADJ register value */
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+ gfladj = reg;
+
+ if (dwc->refclk_fladj) {
+ if ((reg & DWC3_GFLADJ_REFCLK_FLADJ) !=
+ (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ)) {
+ reg &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+ reg |= (dwc->fladj & DWC3_GFLADJ_REFCLK_FLADJ);
+ }
+ }
+
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
- dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
+
+ /* Update DWC3_GFLADJ if there is any change from initial value */
+ if (reg != gfladj)
+ dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
/**
@@ -359,7 +377,7 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
* dwc3_free_event_buffers - frees all allocated event buffers
* @dwc: Pointer to our controller context structure
*/
-static void dwc3_free_event_buffers(struct dwc3 *dwc)
+void dwc3_free_event_buffers(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
@@ -376,7 +394,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
struct dwc3_event_buffer *evt;
@@ -400,6 +418,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
evt = dwc->ev_buf;
evt->lpos = 0;
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
@@ -430,26 +451,46 @@ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
{
+ u32 size;
+
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
if (!dwc->nr_scratch)
return 0;
- dwc->scratchbuf = kmalloc_array(dwc->nr_scratch,
- DWC3_SCRATCHBUF_SIZE, GFP_KERNEL);
+ /* Allocate only if scratchbuf is NULL */
+ if (dwc->scratchbuf)
+ return 0;
+
+ size = dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE;
+
+ dwc->scratchbuf = kzalloc(size, GFP_KERNEL);
+
if (!dwc->scratchbuf)
return -ENOMEM;
+ dwc->scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dwc->dev, dwc->scratch_addr)) {
+ dev_err(dwc->dev, "failed to map scratch buffer\n");
+ return -EFAULT;
+ }
+
return 0;
}
static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
{
- dma_addr_t scratch_addr;
u32 param;
int ret;
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
if (!dwc->has_hibernation)
return 0;
@@ -457,28 +498,17 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
return 0;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return 0;
- scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
- dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
- dev_err(dwc->sysdev, "failed to map scratch buffer\n");
- ret = -EFAULT;
- goto err0;
- }
-
- dwc->scratch_addr = scratch_addr;
-
- param = lower_32_bits(scratch_addr);
+ param = lower_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
if (ret < 0)
goto err1;
- param = upper_32_bits(scratch_addr);
+ param = upper_32_bits(dwc->scratch_addr);
ret = dwc3_send_gadget_generic_command(dwc,
DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
@@ -491,7 +521,6 @@ err1:
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
-err0:
return ret;
}
@@ -504,7 +533,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
return;
/* should never fall here */
- if (!WARN_ON(dwc->scratchbuf))
+ if (WARN_ON(!dwc->scratchbuf))
return;
dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
@@ -534,6 +563,45 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
}
+static int dwc3_config_soc_bus(struct dwc3 *dwc)
+{
+ int ret;
+
+ /*
+ * Check if CCI is enabled for USB. Returns true
+ * if the node has property 'dma-coherent'. Otherwise
+ * returns false.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node)) {
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
+ reg |= DWC3_GSBUSCFG0_DATRDREQINFO |
+ DWC3_GSBUSCFG0_DESRDREQINFO |
+ DWC3_GSBUSCFG0_DATWRREQINFO |
+ DWC3_GSBUSCFG0_DESWRREQINFO;
+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
+ }
+
+ /*
+ * This routes the usb dma traffic to go through CCI path instead
+ * of reaching DDR directly. This traffic routing is needed to
+ * to make SMMU and CCI work with USB dma.
+ */
+ if (of_dma_is_coherent(dwc->dev->of_node) || dwc->dev->iommu_group) {
+ ret = dwc3_enable_hw_coherency(dwc->dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Send struct dwc3 to dwc3-of-simple for configuring VBUS
+ * during suspend/resume
+ */
+ dwc3_set_simple_data(dwc);
+
+ return 0;
+}
+
static int dwc3_core_ulpi_init(struct dwc3 *dwc)
{
int intf;
@@ -699,7 +767,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
static void dwc3_core_exit(struct dwc3 *dwc)
{
- dwc3_event_buffers_cleanup(dwc);
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
@@ -767,8 +834,15 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
break;
case DWC3_GHWPARAMS1_EN_PWROPT_HIB:
- /* enable hibernation here */
- dwc->nr_scratch = DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ if (!device_property_read_bool(dwc->dev,
+ "snps,enable-hibernation")) {
+ dev_dbg(dwc->dev, "Hibernation not enabled\n");
+ } else {
+ /* enable hibernation here */
+ dwc->nr_scratch =
+ DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(hwparams4);
+ dwc->has_hibernation = 1;
+ }
/*
* REVISIT Enabling this bit so that host-mode hibernation
@@ -913,7 +987,7 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
{
unsigned int hw_mode;
u32 reg;
@@ -971,18 +1045,36 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
}
+ if (dwc->mask_phy_rst)
+ dwc3_mask_phy_reset(dwc->dev, TRUE);
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
+ if (dwc->scratchbuf == NULL) {
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret) {
+ dev_err(dwc->dev,
+ "Not enough memory for scratch buffers\n");
+ goto err1;
+ }
+ }
+
ret = dwc3_setup_scratch_buffers(dwc);
- if (ret)
+ if (ret) {
+ dev_err(dwc->dev, "Failed to setup scratch buffers: %d\n", ret);
goto err1;
+ }
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
dwc3_set_incr_burst_type(dwc);
+ ret = dwc3_config_soc_bus(dwc);
+ if (ret)
+ goto err1;
+
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
ret = phy_power_on(dwc->usb2_generic_phy);
@@ -999,6 +1091,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ break;
+ case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ break;
+ case USB_DR_MODE_OTG:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ break;
+ default:
+ dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
+ break;
+ }
+
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
@@ -1010,6 +1117,32 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /* When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. Enabling bit 10 of GUCTL1
+ * will correct this problem
+ */
+ if (dwc->enable_guctl1_resume_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
+ /* SNPS controller when configureed in HOST mode maintains Inter Packet
+ * Delay (IPD) of ~380ns which works with most of the super-speed hubs
+ * except VIA-LAB hubs. When IPD is ~380ns HOST controller fails to
+ * enumerate FS/LS devices when connected behind VIA-LAB hubs.
+ * Enabling bit 9 of GUCTL1 enables the workaround in HW to reduce the
+ * ULPI clock latency by 1 cycle, thus reducing the IPD (~360ns) and
+ * making controller enumerate FS/LS devices connected behind VIA-LAB.
+ */
+ if (dwc->enable_guctl1_ipd_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_IPD_QUIRK;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
if (dwc->revision >= DWC3_REVISION_250A) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
@@ -1218,6 +1351,11 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dev_err(dev, "failed to initialize dual-role\n");
return ret;
}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG)
+ dwc->current_dr_role = 0;
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+#endif
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -1351,8 +1489,19 @@ static void dwc3_get_properties(struct dwc3 *dwc)
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
+ dwc->refclk_fladj = device_property_read_bool(dev,
+ "snps,refclk_fladj");
+ dwc->enable_guctl1_resume_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_resume_quirk");
+ dwc->enable_guctl1_ipd_quirk = device_property_read_bool(dev,
+ "snps,enable_guctl1_ipd_quirk");
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
+ dwc->mask_phy_rst = device_property_read_bool(dev,
+ "snps,mask_phy_reset");
+
+ /* Check if extra quirks to be added */
+ dwc3_simple_check_quirks(dwc);
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -1431,9 +1580,8 @@ static int dwc3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
struct dwc3 *dwc;
-
int ret;
-
+ u32 mdwidth;
void __iomem *regs;
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
@@ -1508,6 +1656,11 @@ static int dwc3_probe(struct platform_device *pdev)
spin_lock_init(&dwc->lock);
+ /* Set dma coherent mask to DMA BUS data width */
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ dev_dbg(dev, "Enabling %d-bit DMA addresses.\n", mdwidth);
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(mdwidth));
+
pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1529,10 +1682,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (ret)
goto err3;
- ret = dwc3_alloc_scratch_buffers(dwc);
- if (ret)
- goto err3;
-
ret = dwc3_core_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1585,6 +1734,7 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_event_buffers_cleanup(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
@@ -1681,6 +1831,18 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
}
+ dwc3_event_buffers_cleanup(dwc);
+
+ /* Put the core into D3 state */
+ dwc3_set_usb_core_power(dwc, false);
+
+ /*
+ * To avoid reinit of phy during resume, prevent calling the
+ * dwc3_core_exit() when in D3 state
+ */
+ if (!dwc->is_d3)
+ dwc3_core_exit(dwc);
+
return 0;
}
@@ -1690,6 +1852,13 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
int ret;
u32 reg;
+ /* Bring core to D0 state */
+ dwc3_set_usb_core_power(dwc, true);
+
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
ret = dwc3_core_init_for_resume(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4c171a8e215f..214f0ff460cb 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -165,6 +165,9 @@
/* Bit fields */
+/* Global Status Register */
+#define DWC3_GSTS_CUR_MODE (1 << 0)
+
/* Global SoC Bus Configuration INCRx Register 0 */
#define DWC3_GSBUSCFG0_INCR256BRSTENA (1 << 7) /* INCR256 burst */
#define DWC3_GSBUSCFG0_INCR128BRSTENA (1 << 6) /* INCR128 burst */
@@ -197,6 +200,12 @@
#define DWC3_EVENTQ 7
#define DWC3_AUXEVENTQ 8
+/* Global SoC Bus Configuration Register */
+#define DWC3_GSBUSCFG0_DATRDREQINFO (0xf << 28)
+#define DWC3_GSBUSCFG0_DESRDREQINFO (0xf << 24)
+#define DWC3_GSBUSCFG0_DATWRREQINFO (0xf << 20)
+#define DWC3_GSBUSCFG0_DESWRREQINFO (0xf << 16)
+
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
@@ -372,6 +381,11 @@
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+#define DWC3_GFLADJ_REFCLK_FLADJ (0x3fff << 8)
+
+/* Global User Control Register 1 */
+#define DWC3_GUCTL1_RESUME_QUIRK (1 << 10)
+#define DWC3_GUCTL1_IPD_QUIRK (1 << 9)
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
@@ -452,6 +466,7 @@
/* Device Status Register */
#define DWC3_DSTS_DCNRD BIT(29)
+#define DWC3_DSTS_SRE BIT(28)
/* This applies for core versions 1.87a and earlier */
#define DWC3_DSTS_PWRUPREQ BIT(24)
@@ -611,6 +626,9 @@
#define DWC3_OSTS_VBUSVLD BIT(1)
#define DWC3_OSTS_CONIDSTS BIT(0)
+/* Stream timer timeout value in millisecs */
+#define STREAM_TIMEOUT_MS 50
+
/* Structures */
struct dwc3_trb;
@@ -874,6 +892,12 @@ struct dwc3_hwparams {
* @num_trbs: number of TRBs used by this request
* @needs_extra_trb: true when request needs one extra TRB (either due to ZLP
* or unaligned OUT)
+ * @stream_timeout_timer: Some endpoints may go out of sync with host and
+ * enter into deadlock. For example, stream capable endpoints may enter
+ * into deadlock where the host waits on gadget to issue ERDY and gadget
+ * waits for host to issue prime transaction. To avoid such deadlock this
+ * timer is used.
+ * @unaligned: true for OUT endpoints with length not divisible by maxp
* @direction: IN or OUT direction flag
* @mapped: true when request has been dma-mapped
*/
@@ -886,6 +910,7 @@ struct dwc3_request {
unsigned num_pending_sgs;
unsigned int num_queued_sgs;
+ u8 first_trb_index;
unsigned remaining;
unsigned int status;
@@ -898,6 +923,7 @@ struct dwc3_request {
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
+ struct timer_list stream_timeout_timer;
unsigned num_trbs;
@@ -941,7 +967,9 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
+ * @refclk_fladj: boolean to update GFLADJ_REFCLK_FLADJ field also
* @irq_gadget: peripheral controller's IRQ number
+ * @otg: pointer to the dwc3_otg structure
* @otg_irq: IRQ number for OTG IRQs
* @current_otg_role: current role of operation while using the OTG block
* @desired_otg_role: desired role of operation while using the OTG block
@@ -1011,6 +1039,7 @@ struct dwc3_scratchpad_array {
* not needed for DWC_usb31 version 1.70a-ea06 and below
* @usb3_lpm_capable: set if hadrware supports Link Power Management
* @usb2_lpm_disable: set to disable usb2 lpm
+ * @remote_wakeup: set if host supports Remote Wakeup from Peripheral
* @disable_scramble_quirk: set if we enable the disable scramble quirk
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
@@ -1031,11 +1060,16 @@ struct dwc3_scratchpad_array {
* provide a free-running PHY clock.
* @dis_del_phy_power_chg_quirk: set if we disable delay phy power
* change quirk.
+ * @enable_guctl1_resume_quirk: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
+ * @enable_guctl1_ipd_quirk: set if we enable quirk for reducing timing of inter
+ * packet delay(ipd).
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
+ * @is_hibernated: true when dwc3 is hibernated; abort processing events
* @tx_de_emphasis: Tx de-emphasis value
* 0 - -6dB de-emphasis
* 1 - -3.5dB de-emphasis
@@ -1044,6 +1078,12 @@ struct dwc3_scratchpad_array {
* @dis_metastability_quirk: set to disable metastability quirk.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
+ * @is_d3: set if the controller is in d3 state
+ * @saved_regs: registers to be saved/restored during hibernation/wakeup events
+ * @irq_wakeup: wakeup IRQ number, triggered when host asks to wakeup from
+ * hibernation
+ * @force_hiber_wake: flag set when the gadget driver is forcefully triggering
+ a hibernation wakeup event
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1077,6 +1117,8 @@ struct dwc3 {
struct reset_control *reset;
+ struct dwc3_otg *otg;
+
struct usb_phy *usb2_phy;
struct usb_phy *usb3_phy;
@@ -1101,6 +1143,7 @@ struct dwc3 {
enum usb_dr_mode role_switch_default_mode;
u32 fladj;
+ bool refclk_fladj;
u32 irq_gadget;
u32 otg_irq;
u32 current_otg_role;
@@ -1210,7 +1253,7 @@ struct dwc3 {
unsigned dis_start_transfer_quirk:1;
unsigned usb3_lpm_capable:1;
unsigned usb2_lpm_disable:1;
-
+ unsigned remote_wakeup:1;
unsigned disable_scramble_quirk:1;
unsigned u2exit_lfps_quirk:1;
unsigned u2ss_inp3_quirk:1;
@@ -1227,15 +1270,23 @@ struct dwc3 {
unsigned dis_rxdet_inp3_quirk:1;
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
+ unsigned enable_guctl1_resume_quirk:1;
+ unsigned enable_guctl1_ipd_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
unsigned parkmode_disable_ss_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
+ unsigned is_hibernated:1;
unsigned dis_metastability_quirk:1;
+ unsigned mask_phy_rst:1;
u16 imod_interval;
+ bool is_d3;
+ u32 *saved_regs;
+ u32 irq_wakeup;
+ bool force_hiber_wake;
};
#define INCRX_BURST_MODE 0
@@ -1412,12 +1463,35 @@ static inline bool dwc3_is_usb31(struct dwc3 *dwc)
return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
}
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+int dwc3_enable_hw_coherency(struct device *dev);
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup);
+void dwc3_set_simple_data(struct dwc3 *dwc);
+void dwc3_simple_check_quirks(struct dwc3 *dwc);
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on);
+void dwc3_mask_phy_reset(struct device *dev, bool mask);
+#else
+static inline int dwc3_enable_hw_coherency(struct device *dev)
+{ return 1; }
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{ ; }
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{ ; }
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{ ; }
+void dwc3_mask_phy_reset(struct device *dev, bool mask)
+{ ; }
+#endif
+
bool dwc3_has_imod(struct dwc3 *dwc);
int dwc3_event_buffers_setup(struct dwc3 *dwc);
void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
-#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
#else
@@ -1427,7 +1501,8 @@ static inline void dwc3_host_exit(struct dwc3 *dwc)
{ }
#endif
-#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)\
+ || IS_ENABLED(CONFIG_USB_DWC3_OTG)
int dwc3_gadget_init(struct dwc3 *dwc);
void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
@@ -1436,6 +1511,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+int dwc3_core_init(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1457,11 +1533,19 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
{ return 0; }
#endif
+#if IS_ENABLED(CONFIG_USB_DWC3_OTG) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+void dwc3_otg_init(struct dwc3 *dwc);
+void dwc3_otg_exit(struct dwc3 *dwc);
+#else
+static inline void dwc3_otg_init(struct dwc3 *dwc)
+{ }
+static inline void dwc3_otg_exit(struct dwc3 *dwc)
+{ }
+#endif
+
#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_drd_init(struct dwc3 *dwc);
void dwc3_drd_exit(struct dwc3 *dwc);
-void dwc3_otg_init(struct dwc3 *dwc);
-void dwc3_otg_exit(struct dwc3 *dwc);
void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus);
void dwc3_otg_host_init(struct dwc3 *dwc);
#else
@@ -1469,10 +1553,6 @@ static inline int dwc3_drd_init(struct dwc3 *dwc)
{ return 0; }
static inline void dwc3_drd_exit(struct dwc3 *dwc)
{ }
-static inline void dwc3_otg_init(struct dwc3 *dwc)
-{ }
-static inline void dwc3_otg_exit(struct dwc3 *dwc)
-{ }
static inline void dwc3_otg_update(struct dwc3 *dwc, bool ignore_idstatus)
{ }
static inline void dwc3_otg_host_init(struct dwc3 *dwc)
@@ -1509,5 +1589,8 @@ static inline int dwc3_ulpi_init(struct dwc3 *dwc)
static inline void dwc3_ulpi_exit(struct dwc3 *dwc)
{ }
#endif
+int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length);
+void dwc3_free_event_buffers(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4fe8b1e1485c..c019cd489555 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -625,6 +625,53 @@ static const struct file_operations dwc3_link_state_fops = {
.release = single_release,
};
+static int dwc3_hiber_enable_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+
+ seq_printf(s, "%s\n", (dwc->has_hibernation ? "Enabled" : "Disabled"));
+
+ return 0;
+}
+
+static int dwc3_hiber_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_hiber_enable_show, inode->i_private);
+}
+
+static ssize_t dwc3_hiber_enable_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ /* Enable hibernation feature */
+ if (!strncmp(buf, "Enable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 1;
+ dwc3_gadget_init(dwc);
+ } else if (!strncmp(buf, "Disable", 6)) {
+ dwc3_gadget_exit(dwc);
+ dwc->has_hibernation = 0;
+ dwc3_gadget_init(dwc);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations dwc3_hiber_enable_fops = {
+ .open = dwc3_hiber_enable_open,
+ .write = dwc3_hiber_enable_write,
+ .read = seq_read,
+};
+
struct dwc3_ep_file_map {
const char name[25];
const struct file_operations *const fops;
@@ -935,6 +982,9 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
&dwc3_testmode_fops);
debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc,
&dwc3_link_state_fops);
+ debugfs_create_file("hiber_enable", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_hiber_enable_fops);
+
dwc3_debugfs_create_endpoint_dirs(dwc, root);
}
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index e64754be47b4..40c2d39b5350 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -21,16 +21,262 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/soc/xilinx/zynqmp/fw.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/slab.h>
+
+#include <linux/phy/phy-zynqmp.h>
+#include <linux/of_address.h>
+
+#include "core.h"
+#include "io.h"
+
+/* USB phy reset mask register */
+#define XLNX_USB_PHY_RST 0x001C
+#define XLNX_PHY_RST_MASK 0x1
+
+/* Xilinx USB 3.0 IP Register */
+#define XLNX_USB_COHERENCY 0x005C
+#define XLNX_USB_COHERENCY_ENABLE 0x1
+
+/* ULPI control registers */
+#define ULPI_OTG_CTRL_SET 0xB
+#define ULPI_OTG_CTRL_CLEAR 0XC
+#define OTG_CTRL_DRVVBUS_OFFSET 5
+
+#define XLNX_USB_CUR_PWR_STATE 0x0000
+#define XLNX_CUR_PWR_STATE_D0 0x00
+#define XLNX_CUR_PWR_STATE_D3 0x0F
+#define XLNX_CUR_PWR_STATE_BITMASK 0x0F
+
+#define XLNX_USB_PME_ENABLE 0x0034
+#define XLNX_PME_ENABLE_SIG_GEN 0x01
+
+#define XLNX_USB_REQ_PWR_STATE 0x003c
+#define XLNX_REQ_PWR_STATE_D0 0x00
+#define XLNX_REQ_PWR_STATE_D3 0x03
+
+/* Number of retries for USB operations */
+#define DWC3_PWR_STATE_RETRIES 1000
+#define DWC3_PWR_TIMEOUT 100
+
+/* Versal USB Node ID */
+#define VERSAL_USB_NODE_ID 0x18224018
+
+/* Versal USB Reset ID */
+#define VERSAL_USB_RESET_ID 0xC104036
+
+#define DWC3_OF_ADDRESS(ADDR) ((ADDR) - DWC3_GLOBALS_REGS_START)
+
+static const struct zynqmp_eemi_ops *eemi_ops;
struct dwc3_of_simple {
struct device *dev;
struct clk_bulk_data *clks;
int num_clocks;
+ void __iomem *regs;
+ struct dwc3 *dwc;
+ struct phy *phy;
+ bool wakeup_capable;
+ bool dis_u3_susphy_quirk;
+ bool enable_d3_suspend;
+ char soc_rev;
struct reset_control *resets;
bool pulse_resets;
bool need_reset;
};
+int dwc3_enable_hw_coherency(struct device *dev)
+{
+ struct device_node *node = of_get_parent(dev->of_node);
+
+ if (of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ void __iomem *regs;
+ u32 reg;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ regs = simple->regs;
+
+ reg = readl(regs + XLNX_USB_COHERENCY);
+ reg |= XLNX_USB_COHERENCY_ENABLE;
+ writel(reg, regs + XLNX_USB_COHERENCY);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dwc3_enable_hw_coherency);
+
+void dwc3_mask_phy_reset(struct device *dev, bool mask)
+{
+ struct device_node *node = of_get_parent(dev->of_node);
+
+ /* This is only valid for versal platforms */
+ if (of_device_is_compatible(node, "xlnx,versal-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ u32 reg;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ reg = readl(simple->regs + XLNX_USB_PHY_RST);
+
+ if (mask)
+ /*
+ * Mask the phy reset signal from comtroller
+ * reaching ULPI phy. This can be done by
+ * writing 0 into usb2_phy_reset register
+ */
+ reg &= ~XLNX_PHY_RST_MASK;
+ else
+ /*
+ * Allow phy reset signal from controller to
+ * reset ULPI phy. This can be done by writing
+ * 0x1 into usb2_phy_reset register
+ */
+ reg |= XLNX_PHY_RST_MASK;
+
+ writel(reg, simple->regs + XLNX_USB_PHY_RST);
+ }
+}
+EXPORT_SYMBOL(dwc3_mask_phy_reset);
+
+void dwc3_set_simple_data(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && (of_device_is_compatible(node, "xlnx,zynqmp-dwc3") ||
+ of_device_is_compatible(node, "xlnx,versal-dwc3"))) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set (struct dwc3 *) to simple->dwc for future use */
+ simple->dwc = dwc;
+ }
+}
+EXPORT_SYMBOL(dwc3_set_simple_data);
+
+void dwc3_simple_check_quirks(struct dwc3 *dwc)
+{
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (node && of_device_is_compatible(node, "xlnx,zynqmp-dwc3")) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Add snps,dis_u3_susphy_quirk */
+ dwc->dis_u3_susphy_quirk = simple->dis_u3_susphy_quirk;
+
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_check_quirks);
+
+void dwc3_simple_wakeup_capable(struct device *dev, bool wakeup)
+{
+ struct device_node *node = of_node_get(dev->parent->of_node);
+
+ /* check for valid parent node */
+ while (node) {
+ if (!of_device_is_compatible(node, "xlnx,zynqmp-dwc3") ||
+ !of_device_is_compatible(node, "xlnx,versal-dwc3"))
+ node = of_get_next_parent(node);
+ else
+ break;
+ }
+
+ if (node) {
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ /* Set wakeup capable as true or false */
+ simple->wakeup_capable = wakeup;
+
+ /* Allow D3 state if wakeup capable only */
+ simple->enable_d3_suspend = wakeup;
+ }
+}
+EXPORT_SYMBOL(dwc3_simple_wakeup_capable);
+
+static int dwc3_simple_set_phydata(struct dwc3_of_simple *simple)
+{
+ struct device *dev = simple->dev;
+ struct device_node *np = dev->of_node;
+ struct phy *phy;
+
+ np = of_get_next_child(np, NULL);
+
+ if (np) {
+ phy = of_phy_get(np, "usb3-phy");
+ if (IS_ERR(phy)) {
+ dev_err(dev, "%s: Can't find usb3-phy\n", __func__);
+ return PTR_ERR(phy);
+ }
+
+ /* Store phy for future usage */
+ simple->phy = phy;
+
+ /* assign USB vendor regs addr to phy platform_data */
+ phy->dev.platform_data = simple->regs;
+
+ phy_put(dev, phy);
+ } else {
+ dev_err(dev, "%s: Can't find child node\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dwc3_dis_u3phy_suspend(struct platform_device *pdev,
+ struct dwc3_of_simple *simple)
+{
+ char *soc_rev;
+
+ /* The below is only valid for ZynqMP SOC */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynqmp-dwc3")) {
+ /* read Silicon version using nvmem driver */
+ soc_rev = zynqmp_nvmem_get_silicon_version(&pdev->dev,
+ "soc_revision");
+
+ if (PTR_ERR(soc_rev) == -EPROBE_DEFER)
+ /* Do a deferred probe */
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(soc_rev) && *soc_rev < ZYNQMP_SILICON_V4)
+ /* Add snps,dis_u3_susphy_quirk
+ * for SOC revison less than v4
+ */
+ simple->dis_u3_susphy_quirk = true;
+
+ if (!IS_ERR(soc_rev)) {
+ /* Update soc_rev to simple for future use */
+ simple->soc_rev = *soc_rev;
+
+ /* Clean soc_rev if got a valid pointer from nvmem
+ * driver else we may end up in kernel panic
+ */
+ kfree(soc_rev);
+ } else {
+ /* Return error */
+ return PTR_ERR(soc_rev);
+ }
+ }
+
+ return 0;
+}
+
static int dwc3_of_simple_probe(struct platform_device *pdev)
{
struct dwc3_of_simple *simple;
@@ -44,9 +290,43 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (!simple)
return -ENOMEM;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ dev_err(dev, "Failed to get eemi_ops\n");
+ return PTR_ERR(eemi_ops);
+ }
+
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynqmp-dwc3") ||
+ of_device_is_compatible(pdev->dev.of_node, "xlnx,versal-dwc3")) {
+
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 0);
+
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /* Store the usb control regs into simple for further usage */
+ simple->regs = regs;
+
+ /*
+ * ZynqMP silicon revision lesser than 4.0 needs to disable
+ * suspend of usb 3.0 phy.
+ */
+ ret = dwc3_dis_u3phy_suspend(pdev, simple);
+ if (ret)
+ return ret;
+ }
+
+ /* Set phy data for future use */
+ dwc3_simple_set_phydata(simple);
+
/*
* Some controllers need to toggle the usb3-otg reset before trying to
* initialize the PHY, otherwise the PHY times out.
@@ -144,6 +424,220 @@ static void dwc3_of_simple_shutdown(struct platform_device *pdev)
__dwc3_of_simple_teardown(simple);
}
+#ifdef CONFIG_PM
+
+static void dwc3_simple_vbus(struct dwc3 *dwc, bool vbus_off)
+{
+ u32 reg, addr;
+ u8 val;
+
+ if (vbus_off)
+ addr = ULPI_OTG_CTRL_CLEAR;
+ else
+ addr = ULPI_OTG_CTRL_SET;
+
+ val = (1 << OTG_CTRL_DRVVBUS_OFFSET);
+
+ reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_GUSB2PHYACC_ADDR(addr);
+ reg |= DWC3_GUSB2PHYACC_WRITE | val;
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYACC(0));
+ writel(reg, dwc->regs + addr);
+}
+
+static void dwc3_usb2phycfg(struct dwc3 *dwc, bool suspend)
+{
+ u32 addr, reg;
+
+ addr = DWC3_OF_ADDRESS(DWC3_GUSB2PHYCFG(0));
+
+ if (suspend) {
+ reg = readl(dwc->regs + addr);
+ if (!(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ } else {
+ reg = readl(dwc->regs + addr);
+ if ((reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ writel(reg, (dwc->regs + addr));
+ }
+ }
+}
+
+static int dwc3_zynqmp_power_req(struct dwc3 *dwc, bool on)
+{
+ u32 reg, retries;
+ void __iomem *reg_base;
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+ reg_base = simple->regs;
+
+ /* Check if entering into D3 state is allowed during suspend */
+ if ((simple->soc_rev < ZYNQMP_SILICON_V4) || !simple->enable_d3_suspend)
+ return 0;
+
+ if (!simple->phy)
+ return 0;
+
+ if (on) {
+ dev_dbg(dwc->dev, "trying to set power state to D0....\n");
+
+ /* Release USB core reset , which was assert during D3 entry */
+ xpsgtr_usb_crst_release(simple->phy);
+
+ /* change power state to D0 */
+ writel(XLNX_REQ_PWR_STATE_D0,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D0 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D0)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D0\n");
+ return -EIO;
+ }
+
+ dwc->is_d3 = false;
+
+ /* Clear Suspend PHY bit if dis_u2_susphy_quirk is set */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, false);
+ } else {
+ dev_dbg(dwc->dev, "Trying to set power state to D3...\n");
+
+ /*
+ * Set Suspend PHY bit before entering D3 if
+ * dis_u2_susphy_quirk is set
+ */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, true);
+
+ /* enable PME to wakeup from hibernation */
+ writel(XLNX_PME_ENABLE_SIG_GEN, reg_base + XLNX_USB_PME_ENABLE);
+
+ /* change power state to D3 */
+ writel(XLNX_REQ_PWR_STATE_D3,
+ reg_base + XLNX_USB_REQ_PWR_STATE);
+
+ /* wait till current state is changed to D3 */
+ retries = DWC3_PWR_STATE_RETRIES;
+ do {
+ reg = readl(reg_base + XLNX_USB_CUR_PWR_STATE);
+ if ((reg & XLNX_CUR_PWR_STATE_BITMASK) ==
+ XLNX_CUR_PWR_STATE_D3)
+ break;
+
+ udelay(DWC3_PWR_TIMEOUT);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(dwc->dev, "Failed to set power state to D3\n");
+ return -EIO;
+ }
+
+ /* Assert USB core reset after entering D3 state */
+ xpsgtr_usb_crst_assert(simple->phy);
+
+ dwc->is_d3 = true;
+ }
+
+ return 0;
+}
+
+static int dwc3_versal_power_req(struct dwc3 *dwc, bool on)
+{
+ int ret;
+ struct platform_device *pdev_parent;
+ struct dwc3_of_simple *simple;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ pdev_parent = of_find_device_by_node(node);
+ simple = platform_get_drvdata(pdev_parent);
+
+ if (!eemi_ops->ioctl || !eemi_ops->reset_assert)
+ return -ENOMEM;
+
+ if (on) {
+ dev_dbg(dwc->dev, "Trying to set power state to D0....\n");
+ ret = eemi_ops->reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_RELEASE);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to De-assert Reset\n");
+
+ ret = eemi_ops->ioctl(VERSAL_USB_NODE_ID, IOCTL_USB_SET_STATE,
+ XLNX_REQ_PWR_STATE_D0,
+ DWC3_PWR_STATE_RETRIES * DWC3_PWR_TIMEOUT,
+ NULL);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to enter D0 state\n");
+
+ dwc->is_d3 = false;
+
+ /* Clear Suspend PHY bit if dis_u2_susphy_quirk is set */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, false);
+ } else {
+ dev_dbg(dwc->dev, "Trying to set power state to D3...\n");
+
+ /*
+ * Set Suspend PHY bit before entering D3 if
+ * dis_u2_susphy_quirk is set
+ */
+ if (dwc->dis_u2_susphy_quirk)
+ dwc3_usb2phycfg(dwc, true);
+
+ ret = eemi_ops->ioctl(VERSAL_USB_NODE_ID, IOCTL_USB_SET_STATE,
+ XLNX_REQ_PWR_STATE_D3,
+ DWC3_PWR_STATE_RETRIES * DWC3_PWR_TIMEOUT,
+ NULL);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to enter D3 state\n");
+
+ ret = eemi_ops->reset_assert(VERSAL_USB_RESET_ID,
+ PM_RESET_ACTION_ASSERT);
+ if (ret < 0)
+ dev_err(simple->dev, "failed to assert Reset\n");
+
+ dwc->is_d3 = true;
+ }
+
+ return ret;
+}
+
+int dwc3_set_usb_core_power(struct dwc3 *dwc, bool on)
+{
+ int ret;
+ struct device_node *node = of_get_parent(dwc->dev->of_node);
+
+ if (of_device_is_compatible(node, "xlnx,zynqmp-dwc3"))
+ /* Set D3/D0 state for ZynqMP */
+ ret = dwc3_zynqmp_power_req(dwc, on);
+ else if (of_device_is_compatible(node, "xlnx,versal-dwc3"))
+ /* Set D3/D0 state for Versal */
+ ret = dwc3_versal_power_req(dwc, on);
+ else
+ /* This is only for Xilinx devices */
+ return 0;
+
+ return ret;
+}
+EXPORT_SYMBOL(dwc3_set_usb_core_power);
+
+#endif
+
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -164,6 +658,14 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ if (!simple->wakeup_capable && !simple->dwc->is_d3) {
+ /* Ask ULPI to turn OFF Vbus */
+ dwc3_simple_vbus(simple->dwc, true);
+
+ /* Disable the clocks */
+ clk_bulk_disable(simple->num_clocks, simple->clks);
+ }
+
if (simple->need_reset)
reset_control_assert(simple->resets);
@@ -173,10 +675,21 @@ static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
+ int ret;
+
+ if (simple->wakeup_capable || simple->dwc->is_d3)
+ return 0;
+
+ ret = clk_bulk_enable(simple->num_clocks, simple->clks);
+ if (ret)
+ return ret;
if (simple->need_reset)
reset_control_deassert(simple->resets);
+ /* Ask ULPI to turn ON Vbus */
+ dwc3_simple_vbus(simple->dwc, false);
+
return 0;
}
@@ -189,6 +702,7 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
{ .compatible = "xlnx,zynqmp-dwc3" },
+ { .compatible = "xlnx,versal-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "amlogic,meson-axg-dwc3" },
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 6dee4dabc0a4..2074e4bfa73a 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -336,6 +336,11 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
}
+ /* Sends the status indicating if the remote wakeup is
+ * supported by device.
+ */
+ usb_status |= dwc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
break;
case USB_RECIP_INTERFACE:
@@ -454,7 +459,12 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
+ if (set)
+ dwc->remote_wakeup = 1;
+ else
+ dwc->remote_wakeup = 0;
break;
+
/*
* 9.4.1 says only only for SS, in AddressState only for
* default control pipe
@@ -471,6 +481,34 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
case USB_DEVICE_TEST_MODE:
ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (set) {
+ if (dwc->gadget.host_request_flag) {
+ struct usb_phy *phy =
+ usb_get_phy(USB_PHY_TYPE_USB3);
+
+ dwc->gadget.b_hnp_enable = 0;
+ dwc->gadget.host_request_flag = 0;
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+ } else {
+ dwc->gadget.b_hnp_enable = 1;
+ }
+ } else
+ return -EINVAL;
+ break;
+
+ case USB_DEVICE_A_HNP_SUPPORT:
+ /* RH port supports HNP */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
+ break;
+
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ /* other RH port does */
+ dev_dbg(dwc->dev,
+ "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+ break;
default:
ret = -EINVAL;
}
@@ -752,7 +790,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
- ret = dwc3_ep0_handle_status(dwc, ctrl);
+ if (le16_to_cpu(ctrl->wIndex) == OTG_STS_SELECTOR)
+ ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ else
+ ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index de3b92680935..8206552b9467 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -210,6 +210,9 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
{
struct dwc3 *dwc = dep->dwc;
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
dwc3_gadget_del_and_unmap_request(dep, req, status);
req->status = DWC3_REQUEST_STATUS_COMPLETED;
@@ -424,8 +427,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
}
-static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
- struct dwc3_trb *trb)
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
@@ -540,6 +542,20 @@ static int dwc3_gadget_start_config(struct dwc3_ep *dep)
return 0;
}
+static void stream_timeout_function(struct timer_list *arg)
+{
+ struct dwc3_request *req = from_timer(req, arg,
+ stream_timeout_timer);
+ struct dwc3_ep *dep = req->dep;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_stop_active_transfer(dep, true, true);
+ __dwc3_gadget_kick_transfer(dep);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
+
static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -573,7 +589,8 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
- | DWC3_DEPCFG_STREAM_EVENT_EN;
+ | DWC3_DEPCFG_STREAM_EVENT_EN
+ | DWC3_DEPCFG_XFER_COMPLETE_EN;
dep->stream_capable = true;
}
@@ -611,7 +628,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
* Caller should take care of locking. Execute all necessary commands to
* initialize a HW endpoint so it can be used by a gadget driver.
*/
-static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
@@ -619,7 +636,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
u32 reg;
int ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
ret = dwc3_gadget_start_config(dep);
if (ret)
return ret;
@@ -629,7 +646,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (ret)
return ret;
- if (!(dep->flags & DWC3_EP_ENABLED)) {
+ if (!(dep->flags & DWC3_EP_ENABLED) || dwc->is_hibernated) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -643,11 +660,13 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
if (usb_endpoint_xfer_control(desc))
goto out;
- /* Initialize the TRB ring */
- dep->trb_dequeue = 0;
- dep->trb_enqueue = 0;
- memset(dep->trb_pool, 0,
- sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ if (!dwc->is_hibernated) {
+ /* Initialize the TRB ring */
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
+ }
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
@@ -663,8 +682,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
- if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
- usb_endpoint_xfer_int(desc)) {
+ if (((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
+ usb_endpoint_xfer_int(desc)) && !dwc->is_hibernated) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
@@ -690,8 +709,6 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -728,7 +745,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
*
* Caller should take care of locking.
*/
-static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -1005,6 +1022,16 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ /*
+ * To start transfer on another stream number endpoint need to relase
+ * previously acquired transfer resource for doing that there is two
+ * ways 1. end transfer 2. set lst bit of control trb
+ *
+ * by using lst bit in ctrl trb we will be able to save the time of
+ * ending transfer hence improved performance
+ */
+ else if (dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
@@ -1222,6 +1249,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1278,6 +1306,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
return ret;
}
+ if (starting && dep->stream_capable) {
+ req->stream_timeout_timer.expires = jiffies +
+ msecs_to_jiffies(STREAM_TIMEOUT_MS);
+ mod_timer(&req->stream_timeout_timer,
+ req->stream_timeout_timer.expires);
+ }
+
return 0;
}
@@ -1436,6 +1471,7 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
return ret;
}
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc);
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
@@ -1460,6 +1496,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
req->request.actual = 0;
req->request.status = -EINPROGRESS;
+ if (dep->stream_capable)
+ timer_setup(&req->stream_timeout_timer,
+ stream_timeout_function, 0);
+
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
@@ -1471,6 +1511,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
}
+ /* If core is hibernated, need to wakeup (remote wakeup) */
+ if (dwc->is_hibernated) {
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
@@ -1484,10 +1531,20 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
!(dep->flags & DWC3_EP_TRANSFER_STARTED))
return 0;
- if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) {
- return __dwc3_gadget_start_isoc(dep);
+ if (dep->flags & DWC3_EP_PENDING_REQUEST) {
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+ /*
+ * If there are not entries in request list
+ * then PENDING flag would be set, so that END
+ * TRANSFER is issued when an entry is added
+ * into request list.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+ dep->flags = DWC3_EP_ENABLED;
}
+
+ /* Rest is taken care by DWC3_DEPEVT_XFERNOTREADY */
+ return 0;
}
}
@@ -1568,6 +1625,9 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->stream_capable && timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req)
break;
@@ -1846,7 +1906,7 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
u32 timeout = 500;
@@ -1921,7 +1981,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return ret;
}
-static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+void dwc3_gadget_enable_irq(struct dwc3 *dwc)
{
u32 reg;
@@ -1935,13 +1995,17 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
+ /* Enable hibernation IRQ */
+ if (dwc->has_hibernation)
+ reg |= DWC3_DEVTEN_HIBERNATIONREQEVTEN;
+
if (dwc->revision < DWC3_REVISION_250A)
reg |= DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
-static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+void dwc3_gadget_disable_irq(struct dwc3 *dwc)
{
/* mask all interrupts */
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
@@ -2025,6 +2089,16 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_setup_nump(dwc);
+ /* For OTG mode, check if the core is currently in Host mode.
+ * This is not an error condition as there are times when the core is
+ * working as host and kernel is told to initiate bind operation with
+ * gadget class driver module.
+ * The below remaining operations are handled in OTG driver whenever
+ * required.
+ */
+ if (dwc3_readl(dwc->regs, DWC3_GSTS) & DWC3_GSTS_CUR_MODE)
+ return 0;
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2058,6 +2132,7 @@ err0:
return ret;
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc);
static int dwc3_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
@@ -2075,6 +2150,18 @@ static int dwc3_gadget_start(struct usb_gadget *g,
goto err0;
}
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+ irq = dwc->irq_wakeup;
+ ret = devm_request_irq(dwc->dev, irq, wakeup_interrupt,
+ IRQF_SHARED, "usb-wakeup", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request wakeup irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
dev_err(dwc->dev, "%s is already bound to %s\n",
@@ -2095,7 +2182,10 @@ static int dwc3_gadget_start(struct usb_gadget *g,
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
+ if (dwc->irq_gadget)
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
+ if (dwc->irq_wakeup)
+ free_irq(dwc->irq_wakeup, dwc);
err0:
return ret;
@@ -2500,6 +2590,14 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
+ if ((event->status & DEPEVT_STATUS_IOC) &&
+ (trb->ctrl & DWC3_TRB_CTRL_IOC))
+ return 1;
+
+ if ((event->status & DEPEVT_STATUS_LST) &&
+ (trb->ctrl & DWC3_TRB_CTRL_LST))
+ return 1;
+
if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
(trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
@@ -2569,9 +2667,13 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req)) {
- __dwc3_gadget_kick_transfer(dep);
- goto out;
+ if ((!dwc3_gadget_ep_request_completed(req) &&
+ req->num_pending_sgs) || req->num_pending_sgs) {
+ if (!(event->status &
+ (DEPEVT_STATUS_SHORT | DEPEVT_STATUS_LST))) {
+ __dwc3_gadget_kick_transfer(dep);
+ goto out;
+ }
}
dwc3_gadget_giveback(dep, req, status);
@@ -2614,7 +2716,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_BUSERR)
status = -ECONNRESET;
- if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
+ if ((event->status & DEPEVT_STATUS_MISSED_ISOC) &&
+ usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
status = -EXDEV;
if (list_empty(&dep->started_list))
@@ -2623,6 +2726,23 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
+ if (dep->stream_capable && !list_empty(&dep->started_list))
+ __dwc3_gadget_kick_transfer(dep);
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ list_empty(&dep->started_list)) {
+ if (list_empty(&dep->pending_list))
+ /*
+ * If there is no entry in request list then do
+ * not issue END TRANSFER now. Just set PENDING
+ * flag, so that END TRANSFER is issued when an
+ * entry is added into request list.
+ */
+ dep->flags |= DWC3_EP_PENDING_REQUEST;
+ else
+ stop = true;
+ }
+
if (stop)
dwc3_stop_active_transfer(dep, true, true);
@@ -2659,6 +2779,28 @@ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_endpoint_stream_event(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3_ep *dep;
+ struct dwc3_request *req;
+ u8 epnum = event->endpoint_number;
+ u8 stream_id;
+
+ dep = dwc->eps[epnum];
+
+ stream_id = event->parameters;
+
+ /* Check for request matching the streamid and delete the timer */
+ list_for_each_entry(req, &dep->started_list, list) {
+ if (req->request.stream_id == stream_id) {
+ if (timer_pending(&req->stream_timeout_timer))
+ del_timer(&req->stream_timeout_timer);
+ break;
+ }
+ }
+}
+
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
@@ -2683,12 +2825,21 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
}
switch (event->endpoint_event) {
+ case DWC3_DEPEVT_XFERCOMPLETE:
+ if (!dep->stream_capable)
+ break;
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+ /* Fall Through */
case DWC3_DEPEVT_XFERINPROGRESS:
dwc3_gadget_endpoint_transfer_in_progress(dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_gadget_endpoint_transfer_not_ready(dep, event);
break;
+ case DWC3_DEPEVT_STREAMEVT:
+ if (event->status == DEPEVT_STREAMEVT_FOUND)
+ dwc3_endpoint_stream_event(dwc, event);
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
@@ -2703,8 +2854,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
- case DWC3_DEPEVT_STREAMEVT:
- case DWC3_DEPEVT_XFERCOMPLETE:
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
@@ -2749,8 +2898,8 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
}
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt)
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
@@ -2800,6 +2949,13 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+
+ /*
+ * when transfer is stopped with force rm bit false, it can be
+ * restarted by passing resource_index in params; don't loose it
+ */
+ if (force)
+ dep->resource_index = 0;
}
static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
@@ -2837,6 +2993,15 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * clear DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
@@ -3011,6 +3176,16 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
}
/*
+ * In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit here
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /*
* Configure PHY via GUSB3PIPECTLn if required.
*
* Update GTXFIFOSIZn
@@ -3033,6 +3208,17 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
}
}
+static irqreturn_t wakeup_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = (struct dwc3 *)_dwc;
+
+ spin_lock(&dwc->lock);
+ gadget_wakeup_interrupt(dwc);
+ spin_unlock(&dwc->lock);
+
+ return IRQ_HANDLED;
+}
+
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
@@ -3160,10 +3346,12 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
* STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
* Device Fallback from SuperSpeed
*/
- if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+ if ((!!is_ss ^ (dwc->speed >= DWC3_DSTS_SUPERSPEED)) &&
+ (!(dwc->has_hibernation)))
return;
/* enter hibernation here */
+ gadget_hibernation_interrupt(dwc);
}
static void dwc3_gadget_interrupt(struct dwc3 *dwc,
@@ -3257,12 +3445,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
*/
evt->lpos = (evt->lpos + 4) % evt->length;
left -= 4;
+
+ if (dwc->is_hibernated)
+ break;
}
evt->count = 0;
evt->flags &= ~DWC3_EVENT_PENDING;
ret = IRQ_HANDLED;
+ if (dwc->is_hibernated)
+ return ret;
+
/* Unmask interrupt */
reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg &= ~DWC3_GEVNTSIZ_INTMASK;
@@ -3304,6 +3498,9 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
return IRQ_HANDLED;
}
+ if (dwc->is_hibernated)
+ return IRQ_HANDLED;
+
/*
* With PCIe legacy interrupt, test shows that top-half irq handler can
* be called again after HW interrupt deassertion. Check if bottom-half
@@ -3347,7 +3544,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
static int dwc3_gadget_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
- int irq;
+ int irq, irq_hiber;
irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
if (irq > 0)
@@ -3365,12 +3562,25 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
irq = platform_get_irq(dwc3_pdev, 0);
if (irq > 0)
- goto out;
+ dwc->irq_gadget = irq;
- if (!irq)
- irq = -EINVAL;
+ if (irq == -EPROBE_DEFER)
+ goto out;
out:
+ /* look for wakeup interrupt if hibernation is supported */
+ if (dwc->has_hibernation) {
+
+ irq_hiber = platform_get_irq_byname(dwc3_pdev, "hiber");
+ if (irq_hiber > 0) {
+ dwc->irq_wakeup = irq_hiber;
+ } else {
+ irq_hiber = platform_get_irq(dwc3_pdev, 2);
+ if (irq_hiber > 0)
+ dwc->irq_wakeup = irq_hiber;
+ }
+ }
+
return irq;
}
@@ -3422,6 +3632,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
dwc->gadget.lpm_capable = true;
+ dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
/*
* FIXME We might be setting max_speed to <SUPER, however versions
@@ -3461,6 +3672,28 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err4;
}
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+ struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg) {
+ ret = otg_set_peripheral(phy->otg,
+ &dwc->gadget);
+ if (ret) {
+ dev_err(dwc->dev,
+ "otg_set_peripheral failed\n");
+ usb_put_phy(phy);
+ phy = NULL;
+ goto err4;
+ }
+ } else {
+ usb_put_phy(phy);
+ phy = NULL;
+ }
+ }
+ }
+
dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
return 0;
@@ -3501,6 +3734,16 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return 0;
+ if (dwc->is_hibernated) {
+ /*
+ * As we are about to suspend, wake the controller from
+ * D3 & hibernation states
+ */
+ dwc->force_hiber_wake = true;
+ gadget_wakeup_interrupt(dwc);
+ dwc->force_hiber_wake = false;
+ }
+
dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
@@ -3511,6 +3754,7 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
+ u32 reg;
if (!dwc->gadget_driver)
return 0;
@@ -3523,6 +3767,15 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
if (ret < 0)
goto err1;
+ /* In USB 2.0, to avoid hibernation interrupt at the time of connection
+ * set DWC3_DCTL_KEEP_CONNECT bit.
+ */
+ if (dwc->has_hibernation) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fbc7d8013f0b..26d41e0da515 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -54,6 +54,14 @@ struct dwc3;
/* U2 Device exit Latency */
#define DWC3_DEFAULT_U2_DEV_EXIT_LAT 0x1FF /* Less then 511 microsec */
+/* Below used in hibernation */
+#define DWC3_NON_STICKY_RESTORE_RETRIES 500
+#define DWC3_NON_STICKY_SAVE_RETRIES 500
+#define DWC3_DEVICE_CTRL_READY_RETRIES 20000
+#define DWC3_NON_STICKY_RESTORE_DELAY 100
+#define DWC3_NON_STICKY_SAVE_DELAY 100
+#define DWC3_DEVICE_CTRL_READY_DELAY 5
+
/* -------------------------------------------------------------------------- */
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
@@ -106,11 +114,22 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action);
+int __dwc3_gadget_ep_disable(struct dwc3_ep *dep);
+int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep);
+void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt);
+int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend);
+dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, struct dwc3_trb *trb);
+void gadget_hibernation_interrupt(struct dwc3 *dwc);
+void gadget_wakeup_interrupt(struct dwc3 *dwc);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/gadget_hibernation.c b/drivers/usb/dwc3/gadget_hibernation.c
new file mode 100644
index 000000000000..850c59625892
--- /dev/null
+++ b/drivers/usb/dwc3/gadget_hibernation.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * gadget_hibernation.c - DesignWare USB3 DRD Controller gadget hibernation file
+ *
+ * This file has routines to handle hibernation and wakeup events in gadget mode
+ *
+ * Author: Mayank Adesara <madesara@xilinx.com>
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "core.h"
+#include "gadget.h"
+#include "debug.h"
+#include "io.h"
+
+/* array of registers to save on hibernation and restore them on wakeup */
+static u32 save_reg_addr[] = {
+ DWC3_DCTL,
+ DWC3_DCFG,
+ DWC3_DEVTEN
+};
+
+/*
+ * wait_timeout - Waits until timeout
+ * @wait_time: time to wait in jiffies
+ */
+static void wait_timeout(unsigned long wait_time)
+{
+ unsigned long timeout = jiffies + wait_time;
+
+ while (!time_after_eq(jiffies, timeout))
+ cpu_relax();
+}
+
+/**
+ * save_regs - Saves registers on hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dwc->saved_regs = devm_kmalloc(dwc->dev,
+ sizeof(save_reg_addr),
+ GFP_KERNEL);
+ if (!dwc->saved_regs) {
+ dev_err(dwc->dev, "Not enough memory to save regs\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc->saved_regs[i] = dwc3_readl(dwc->regs,
+ save_reg_addr[i]);
+ return 0;
+}
+
+/**
+ * restore_regs - Restores registers on wakeup
+ * @dwc: pointer to our controller context structure
+ */
+static void restore_regs(struct dwc3 *dwc)
+{
+ int i;
+
+ if (!dwc->saved_regs) {
+ dev_warn(dwc->dev, "Regs not saved\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(save_reg_addr); i++)
+ dwc3_writel(dwc->regs, save_reg_addr[i],
+ dwc->saved_regs[i]);
+}
+
+/**
+ * restart_ep0_trans - Restarts EP0 transfer on wakeup
+ * @dwc: pointer to our controller context structure
+ * epnum: endpoint number
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restart_ep0_trans(struct dwc3 *dwc, int epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+ struct dwc3_trb *trb = dwc->ep0_trb;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+ u32 cmd;
+
+ memset(&params, 0, sizeof(params));
+ params.param0 = upper_32_bits(dwc->ep0_trb_addr);
+ params.param1 = lower_32_bits(dwc->ep0_trb_addr);
+
+ /* set HWO bit back to 1 and restart transfer */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS feild */
+ trb->size &= ~(0x0F << 28);
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_PARAM(0);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to restart transfer on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+
+ return 0;
+}
+
+extern dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+ struct dwc3_trb *trb);
+/**
+ * restore_eps - Restores non EP0 eps in the same state as they were before
+ * hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_eps(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ /* Enable the endpoint */
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+ }
+
+ for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_STALL) {
+ /* Set stall for the endpoint */
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ u32 cmd;
+ struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_trb *trb;
+ u8 trb_dequeue = dep->trb_dequeue;
+
+ trb = &dep->trb_pool[trb_dequeue];
+
+ /*
+ * check the last processed TRBSTS field has value
+ * 4 (TRBInProgress), if yes resubmit the same TRB
+ */
+ if (DWC3_TRB_SIZE_TRBSTS(trb->size) ==
+ DWC3_TRB_STS_XFER_IN_PROG) {
+ /* Set the HWO bit */
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
+ /* Clear the TRBSTS field */
+ trb->size &= ~(0x0F << 28);
+
+ memset(&params, 0, sizeof(params));
+
+ /* Issue starttransfer */
+ params.param0 =
+ upper_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+ params.param1 =
+ lower_32_bits(dwc3_trb_dma_offset(dep,
+ trb));
+
+ cmd = DWC3_DEPCMD_STARTTRANSFER |
+ DWC3_DEPCMD_PARAM(0);
+
+ dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+
+ dwc3_gadget_ep_get_transfer_index(dep);
+ } else {
+ ret = __dwc3_gadget_kick_transfer(dep);
+ if (ret) {
+ dev_err(dwc->dev,
+ "%s: restart transfer failed\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * restore_ep0 - Restores EP0 in the same state as they were before hibernation
+ * @dwc: pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int restore_ep0(struct dwc3 *dwc)
+{
+ int epnum, ret;
+
+ for (epnum = 0; epnum < 2; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ ret = __dwc3_gadget_ep_enable(dep, true);
+ if (ret) {
+ dev_err(dwc->dev, "failed to enable %s\n", dep->name);
+ return ret;
+ }
+
+ if (dep->flags & DWC3_EP_STALL) {
+ struct dwc3_gadget_ep_cmd_params params;
+
+ memset(&params, 0x00, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "failed to set STALL on %s\n",
+ dep->name);
+ return ret;
+ }
+ } else {
+ if (!dep->resource_index && epnum)
+ continue;
+
+ ret = restart_ep0_trans(dwc, epnum);
+ if (ret) {
+ dev_err(dwc->dev,
+ "failed to restart transfer on: %s\n",
+ dep->name);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * save_endpoint_state - Saves ep state on hibernation
+ * @dep: endpoint to get state
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int save_endpoint_state(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ memset(&params, 0, sizeof(params));
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_GETEPSTATE,
+ &params);
+ if (ret) {
+ dev_err(dwc->dev, "Failed to get endpoint state on %s\n",
+ dep->name);
+ return ret;
+ }
+
+ dep->saved_state = dwc3_readl(dep->regs, DWC3_DEPCMDPAR2);
+ return 0;
+}
+
+/**
+ * gadget_hibernation_interrupt - Interrupt handler of hibernation
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_hibernation_interrupt(struct dwc3 *dwc)
+{
+ u32 epnum, reg;
+ int retries, ret;
+
+ /* Check if the link state is valid before hibernating */
+ switch (dwc3_gadget_get_link_state(dwc)) {
+ case DWC3_LINK_STATE_U3:
+ case DWC3_LINK_STATE_SS_DIS:
+ break;
+ default:
+ dev_dbg(dwc->dev,
+ "%s: Got fake hiber event\n", __func__);
+ return;
+ }
+
+ /* stop all active transfers and save endpoint status */
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (dep->flags & DWC3_EP_TRANSFER_STARTED)
+ dwc3_stop_active_transfer(dep, false, false);
+
+ save_endpoint_state(dep);
+ }
+
+ /* stop the controller */
+ dwc3_gadget_run_stop(dwc, false, true);
+ dwc->is_hibernated = true;
+
+ /*
+ * ack events, don't process them; h/w decrements the count by the value
+ * written
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), reg);
+ dwc->ev_buf->count = 0;
+ dwc->ev_buf->flags &= ~DWC3_EVENT_PENDING;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+
+ /* disable keep connect if we are disconnected right now */
+ if (dwc3_gadget_get_link_state(dwc) == DWC3_LINK_STATE_SS_DIS) {
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ } else {
+ reg |= DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+
+ /* save generic registers */
+ save_regs(dwc);
+
+ /* initiate controller save state */
+ reg |= DWC3_DCTL_CSS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* wait till controller saves state */
+ retries = DWC3_NON_STICKY_SAVE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_SSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_SAVE_DELAY);
+ } while (--retries);
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to save state\n");
+ goto err;
+ }
+
+ /* Set the controller as wakeup capable */
+ dwc3_simple_wakeup_capable(dwc->dev, true);
+
+ /* set USB core power state to D3 - power down */
+ ret = dwc3_set_usb_core_power(dwc, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "%s: Failed to hibernate\n", __func__);
+ /* call wakeup handler */
+ gadget_wakeup_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "Hibernated!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Hibernation Interrupt\n");
+}
+
+/**
+ * gadget_wakeup_interrupt - Interrupt handler of wakeup
+ * @dwc: pointer to our controller context structure
+ */
+void gadget_wakeup_interrupt(struct dwc3 *dwc)
+{
+ u32 reg, link_state;
+ int ret, retries;
+ bool enter_hiber = false;
+
+ /* On USB 2.0 we observed back to back wakeup interrupts */
+ if (!dwc->is_hibernated) {
+ dev_err(dwc->dev, "Not in hibernated state\n");
+ goto err;
+ }
+
+ /* Restore power to USB core */
+ if (dwc3_set_usb_core_power(dwc, true)) {
+ dev_err(dwc->dev, "Failed to restore USB core power\n");
+ goto err;
+ }
+
+ /* Clear the controller wakeup capable flag */
+ dwc3_simple_wakeup_capable(dwc->dev, false);
+
+ /* Initialize the core and restore the saved registers */
+ dwc3_core_init(dwc);
+ restore_regs(dwc);
+
+ /* ask controller to save the non-sticky registers */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CRS;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ /* Wait till non-sticky registers are restored */
+ retries = DWC3_NON_STICKY_RESTORE_RETRIES;
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (!(reg & DWC3_DSTS_RSS))
+ break;
+
+ udelay(DWC3_NON_STICKY_RESTORE_DELAY);
+ } while (--retries);
+
+ if (retries < 0 || (reg & DWC3_DSTS_SRE)) {
+ dev_err(dwc->dev, "Failed to restore non-sticky regs\n");
+ goto err;
+ }
+
+ /* restore ep0 endpoints */
+ ret = restore_ep0(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed in restorig EP0 states\n");
+ goto err;
+ }
+
+ /* start the controller */
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ if (ret < 0) {
+ dev_err(dwc->dev, "USB core failed to start on wakeup\n");
+ goto err;
+ }
+
+ /* Wait until device controller is ready */
+ retries = DWC3_DEVICE_CTRL_READY_RETRIES;
+ while (--retries) {
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ if (reg & DWC3_DSTS_DCNRD)
+ udelay(DWC3_DEVICE_CTRL_READY_DELAY);
+ else
+ break;
+ }
+
+ if (retries < 0) {
+ dev_err(dwc->dev, "USB core failed to restore controller\n");
+ goto err;
+ }
+
+ /*
+ * As some suprious signals also cause wakeup event, wait for some time
+ * and check the link state to confirm if the wakeup signal is real
+ */
+ wait_timeout(msecs_to_jiffies(10));
+
+ link_state = dwc3_gadget_get_link_state(dwc);
+
+ /* check if the link state is in a valid state */
+ switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
+ /* Reset devaddr */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_DEVADDR_MASK);
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+
+ case DWC3_LINK_STATE_SS_DIS:
+ /* Clear keep connect from reconnecting to HOST */
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ /* fall through */
+ case DWC3_LINK_STATE_U3:
+ /* Ignore wakeup event as the link is still in U3 state */
+ dev_dbg(dwc->dev, "False wakeup event %d\n", link_state);
+
+ if (!dwc->force_hiber_wake)
+ enter_hiber = true;
+ break;
+
+ default:
+ /* issue recovery on the link */
+ ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+ if (ret < 0) {
+ dev_err(dwc->dev,
+ "Failed to set link state to Recovery\n");
+ goto err;
+ }
+
+ break;
+ }
+
+ if (link_state != DWC3_LINK_STATE_SS_DIS) {
+ /* Restore non EP0 EPs */
+ ret = restore_eps(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "Failed restoring non-EP0 states\n");
+ goto err;
+ }
+ }
+
+ /* clear the flag */
+ dwc->is_hibernated = false;
+
+ if (enter_hiber) {
+ /*
+ * as the wakeup was because of the spurious signals,
+ * enter hibernation again
+ */
+ gadget_hibernation_interrupt(dwc);
+ return;
+ }
+
+ dev_info(dwc->dev, "We are back from hibernation!\n");
+ return;
+
+err:
+ dev_err(dwc->dev, "Fail in handling Wakeup Interrupt\n");
+}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 86dbd012b984..101b791d18f2 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -9,9 +9,17 @@
#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/usb/xhci_pdriver.h>
#include "core.h"
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{
+ dwc3_simple_wakeup_capable(dev, wakeup);
+}
+EXPORT_SYMBOL(dwc3_host_wakeup_capable);
+
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
@@ -44,7 +52,7 @@ out:
int dwc3_host_init(struct dwc3 *dwc)
{
- struct property_entry props[4];
+ struct property_entry props[5];
struct platform_device *xhci;
int ret, irq;
struct resource *res;
@@ -95,6 +103,10 @@ int dwc3_host_init(struct dwc3 *dwc)
if (dwc->usb2_lpm_disable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb2-lpm-disable");
+ if (device_property_read_bool(&dwc3_pdev->dev,
+ "snps,xhci-stream-quirk"))
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-stream-quirk");
+
/**
* WORKAROUND: dwc3 revisions <=3.00a have a limitation
* where Port Disable command doesn't work.
@@ -115,6 +127,24 @@ int dwc3_host_init(struct dwc3 *dwc)
}
}
+ phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
+ dev_name(dwc->dev));
+ phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
+ dev_name(dwc->dev));
+
+ if (dwc->dr_mode == USB_DR_MODE_OTG) {
+
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+
+ if (!IS_ERR(phy)) {
+ if (phy && phy->otg)
+ otg_set_host(phy->otg,
+ (struct usb_bus *)0xdeadbeef);
+
+ usb_put_phy(phy);
+ }
+ }
+
ret = platform_device_add(xhci);
if (ret) {
dev_err(dwc->dev, "failed to register xHCI device\n");
diff --git a/drivers/usb/dwc3/otg.c b/drivers/usb/dwc3/otg.c
new file mode 100644
index 000000000000..6807e5426a48
--- /dev/null
+++ b/drivers/usb/dwc3/otg.c
@@ -0,0 +1,2190 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * otg.c - DesignWare USB3 DRD Controller OTG file
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Author: Manish Narani <mnarani@xilinx.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/version.h>
+#include <linux/sysfs.h>
+
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#include <../drivers/usb/host/xhci.h>
+#include "platform_data.h"
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+#include "otg.h"
+
+#include <linux/ulpi/regs.h>
+#include <linux/ulpi/driver.h>
+#include "debug.h"
+
+/* Print the hardware registers' value for debugging purpose */
+static void print_debug_regs(struct dwc3_otg *otg)
+{
+ u32 gctl = otg_read(otg, DWC3_GCTL);
+ u32 gsts = otg_read(otg, DWC3_GSTS);
+ u32 gdbgltssm = otg_read(otg, DWC3_GDBGLTSSM);
+ u32 gusb2phycfg0 = otg_read(otg, DWC3_GUSB2PHYCFG(0));
+ u32 gusb3pipectl0 = otg_read(otg, DWC3_GUSB3PIPECTL(0));
+ u32 dcfg = otg_read(otg, DWC3_DCFG);
+ u32 dctl = otg_read(otg, DWC3_DCTL);
+ u32 dsts = otg_read(otg, DWC3_DSTS);
+ u32 ocfg = otg_read(otg, OCFG);
+ u32 octl = otg_read(otg, OCTL);
+ u32 oevt = otg_read(otg, OEVT);
+ u32 oevten = otg_read(otg, OEVTEN);
+ u32 osts = otg_read(otg, OSTS);
+
+ otg_info(otg, "gctl = %08x\n", gctl);
+ otg_info(otg, "gsts = %08x\n", gsts);
+ otg_info(otg, "gdbgltssm = %08x\n", gdbgltssm);
+ otg_info(otg, "gusb2phycfg0 = %08x\n", gusb2phycfg0);
+ otg_info(otg, "gusb3pipectl0 = %08x\n", gusb3pipectl0);
+ otg_info(otg, "dcfg = %08x\n", dcfg);
+ otg_info(otg, "dctl = %08x\n", dctl);
+ otg_info(otg, "dsts = %08x\n", dsts);
+ otg_info(otg, "ocfg = %08x\n", ocfg);
+ otg_info(otg, "octl = %08x\n", octl);
+ otg_info(otg, "oevt = %08x\n", oevt);
+ otg_info(otg, "oevten = %08x\n", oevten);
+ otg_info(otg, "osts = %08x\n", osts);
+}
+
+/* Check whether the hardware supports HNP or not */
+static int hnp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_HNP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Check whether the hardware supports SRP or not */
+static int srp_capable(struct dwc3_otg *otg)
+{
+ if (otg->hwparams6 & GHWPARAMS6_SRP_SUPPORT_ENABLED)
+ return 1;
+ return 0;
+}
+
+/* Wakeup main thread to execute the OTG flow after an event */
+static void wakeup_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread)
+ return;
+
+ otg_vdbg(otg, "\n");
+ /* Tell the main thread that something has happened */
+ otg->main_wakeup_needed = 1;
+ wake_up_interruptible(&otg->main_wq);
+}
+
+/* Sleep main thread for 'msecs' to wait for an event to occur */
+static int sleep_main_thread_timeout(struct dwc3_otg *otg, int msecs)
+{
+ signed long jiffies;
+ int rc = msecs;
+
+ if (signal_pending(current)) {
+ otg_dbg(otg, "Main thread signal pending\n");
+ rc = -EINTR;
+ goto done;
+ }
+ if (otg->main_wakeup_needed) {
+ otg_dbg(otg, "Main thread wakeup needed\n");
+ rc = msecs;
+ goto done;
+ }
+
+ jiffies = msecs_to_jiffies(msecs);
+ rc = wait_event_freezable_timeout(otg->main_wq,
+ otg->main_wakeup_needed,
+ jiffies);
+
+ if (rc > 0)
+ rc = jiffies_to_msecs(rc);
+
+done:
+ otg->main_wakeup_needed = 0;
+ return rc;
+}
+
+/* Sleep main thread to wait for an event to occur */
+static int sleep_main_thread(struct dwc3_otg *otg)
+{
+ int rc;
+
+ do {
+ rc = sleep_main_thread_timeout(otg, 5000);
+ } while (rc == 0);
+
+ return rc;
+}
+
+static void get_events(struct dwc3_otg *otg, u32 *otg_events, u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static void get_and_clear_events(struct dwc3_otg *otg, u32 *otg_events,
+ u32 *user_events)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&otg->lock, flags);
+
+ if (otg_events)
+ *otg_events = otg->otg_events;
+
+ if (user_events)
+ *user_events = otg->user_events;
+
+ otg->otg_events = 0;
+ otg->user_events = 0;
+
+ spin_unlock_irqrestore(&otg->lock, flags);
+}
+
+static int check_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask)
+{
+ u32 otg_events;
+ u32 user_events;
+
+ get_events(otg, &otg_events, &user_events);
+ if ((otg_events & otg_mask) || (user_events & user_mask)) {
+ otg_dbg(otg, "Event occurred: otg_events=%x, otg_mask=%x, \
+ user_events=%x, user_mask=%x\n", otg_events,
+ otg_mask, user_events, user_mask);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sleep_until_event(struct dwc3_otg *otg, u32 otg_mask, u32 user_mask,
+ u32 *otg_events, u32 *user_events, int timeout)
+{
+ int rc;
+
+ /* Enable the events */
+ if (otg_mask)
+ otg_write(otg, OEVTEN, otg_mask);
+
+ /* Wait until it occurs, or timeout, or interrupt. */
+ if (timeout) {
+ otg_vdbg(otg, "Waiting for event (timeout=%d)...\n", timeout);
+ rc = sleep_main_thread_until_condition_timeout(otg,
+ check_event(otg, otg_mask, user_mask), timeout);
+ } else {
+ otg_vdbg(otg, "Waiting for event (no timeout)...\n");
+ rc = sleep_main_thread_until_condition(otg,
+ check_event(otg, otg_mask, user_mask));
+ }
+
+ /* Disable the events */
+ otg_write(otg, OEVTEN, 0);
+
+ otg_vdbg(otg, "Woke up rc=%d\n", rc);
+ if (rc >= 0)
+ get_and_clear_events(otg, otg_events, user_events);
+
+ return rc;
+}
+
+static void set_capabilities(struct dwc3_otg *otg)
+{
+ u32 ocfg = 0;
+
+ otg_dbg(otg, "\n");
+ if (srp_capable(otg))
+ ocfg |= OCFG_SRP_CAP;
+
+ if (hnp_capable(otg))
+ ocfg |= OCFG_HNP_CAP;
+
+ otg_write(otg, OCFG, ocfg);
+
+ otg_dbg(otg, "Enabled SRP and HNP capabilities in OCFG\n");
+}
+
+static int otg3_handshake(struct dwc3_otg *otg, u32 reg, u32 mask, u32 done,
+ u32 msec)
+{
+ u32 result;
+ u32 usec = msec * 1000;
+
+ otg_vdbg(otg, "reg=%08x, mask=%08x, value=%08x\n", reg, mask, done);
+ do {
+ result = otg_read(otg, reg);
+ if ((result & mask) == done)
+ return 1;
+ udelay(1);
+ usec -= 1;
+ } while (usec > 0);
+
+ return 0;
+}
+
+static int reset_port(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return -ENODEV;
+ return usb_bus_start_enum(otg->otg.host, 1);
+}
+
+static int set_peri_mode(struct dwc3_otg *otg, int mode)
+{
+ u32 octl;
+
+ /* Set peri_mode */
+ octl = otg_read(otg, OCTL);
+ if (mode)
+ octl |= OCTL_PERI_MODE;
+ else
+ octl &= ~OCTL_PERI_MODE;
+
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL PERI_MODE = %d in OCTL\n", mode);
+
+ if (mode)
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE,
+ OSTS_PERIP_MODE, 100);
+ else
+ return otg3_handshake(otg, OSTS, OSTS_PERIP_MODE, 0, 100);
+
+ msleep(20);
+}
+
+static int start_host(struct dwc3_otg *otg)
+{
+ int ret = -ENODEV;
+ int flg;
+ u32 octl;
+ u32 osts;
+ u32 ocfg;
+ u32 dctl;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ /*
+ * Prevent the host USBCMD.HCRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ dctl = otg_read(otg, DCTL);
+ if (dctl & DWC3_DCTL_RUN_STOP) {
+ otg_dbg(otg, "Disabling the RUN/STOP bit\n");
+ dctl &= ~DWC3_DCTL_RUN_STOP;
+ otg_write(otg, DCTL, dctl);
+ }
+
+ if (!set_peri_mode(otg, PERI_MODE_HOST)) {
+ otg_err(otg, "Failed to start host\n");
+ return -EINVAL;
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ if (otg->host_started) {
+ otg_info(otg, "Host already started\n");
+ goto skip;
+ }
+
+ /* Start host driver */
+
+ *(struct xhci_hcd **)hcd->hcd_priv = xhci;
+ ret = usb_add_hcd(hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg, "%s: failed to start primary hcd, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ *(struct xhci_hcd **)xhci->shared_hcd->hcd_priv = xhci;
+ if (xhci->shared_hcd) {
+ ret = usb_add_hcd(xhci->shared_hcd, otg->hcd_irq, IRQF_SHARED);
+ if (ret) {
+ otg_err(otg,
+ "%s: failed to start secondary hcd, ret=%d\n",
+ __func__, ret);
+ usb_remove_hcd(hcd);
+ return ret;
+ }
+ }
+
+ otg->host_started = 1;
+skip:
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd)
+ xhci->shared_hcd->self.otg_port = 1;
+
+ set_capabilities(otg);
+
+ /* Power the port only for A-host */
+ if (otg->otg.state == OTG_STATE_A_WAIT_VRISE) {
+ /* Spin on xhciPrtPwr bit until it becomes 1 */
+ osts = otg_read(otg, OSTS);
+ flg = otg3_handshake(otg, OSTS,
+ OSTS_XHCI_PRT_PWR,
+ OSTS_XHCI_PRT_PWR,
+ 1000);
+ if (flg) {
+ otg_dbg(otg, "Port is powered by xhci-hcd\n");
+ /* Set port power control bit */
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_PRT_PWR_CTL;
+ otg_write(otg, OCTL, octl);
+ } else {
+ otg_dbg(otg, "Port is not powered by xhci-hcd\n");
+ }
+ }
+
+ return ret;
+}
+
+static int stop_host(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->host_started) {
+ otg_info(otg, "Host already stopped\n");
+ return 1;
+ }
+
+ if (!otg->otg.host)
+ return -ENODEV;
+
+ otg_dbg(otg, "%s: turn off host %s\n",
+ __func__, otg->otg.host->bus_name);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+
+ if (xhci->shared_hcd)
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_remove_hcd(hcd);
+
+ otg->host_started = 0;
+ otg->dev_enum = 0;
+ return 0;
+}
+
+int dwc3_otg_host_release(struct usb_hcd *hcd)
+{
+ struct usb_bus *bus;
+ struct usb_device *rh;
+ struct usb_device *udev;
+
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ rh = bus->root_hub;
+ udev = usb_hub_find_child(rh, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc;
+
+ if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **)&desc, sizeof(*desc)) ==
+ 0) {
+ int err;
+
+ dev_info(&udev->dev, "found OTG descriptor\n");
+ if ((desc->bcdOTG >= 0x0200) &&
+ (udev->speed == USB_SPEED_HIGH)) {
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_TEST_MODE,
+ 7 << 8,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ dev_info(&udev->dev,
+ "can't initiate HNP from host: %d\n",
+ err);
+ return -1;
+ }
+ }
+ } else {
+ dev_info(&udev->dev, "didn't find OTG descriptor\n");
+ }
+ } else {
+ dev_info(&udev->dev,
+ "udev->config NULL or udev->parent != udev->bus->root_hub\n");
+ }
+
+ return 0;
+}
+
+/* Sends the host release set feature request */
+static void host_release(struct dwc3_otg *otg)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "\n");
+ if (!otg->otg.host)
+ return;
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ dwc3_otg_host_release(hcd);
+ if (xhci->shared_hcd)
+ dwc3_otg_host_release(xhci->shared_hcd);
+}
+
+static void dwc3_otg_setup_event_buffers(struct dwc3_otg *otg)
+{
+ if (dwc3_readl(otg->dwc->regs, DWC3_GEVNTADRLO(0)) == 0x0) {
+
+ otg_dbg(otg, "setting up event buffers\n");
+ dwc3_event_buffers_setup(otg->dwc);
+ }
+
+}
+
+static void start_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+ u32 ocfg;
+
+ otg_dbg(otg, "\n");
+ if (!gadget)
+ return;
+
+ /*
+ * Prevent the gadget DCTL.CSFTRST from resetting OTG core by setting
+ * OCFG.OTGSftRstMsk
+ */
+ ocfg = otg_read(otg, OCFG);
+ ocfg |= DWC3_OCFG_SFTRSTMASK;
+ otg_write(otg, OCFG, ocfg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ if (otg->peripheral_started) {
+ otg_info(otg, "Peripheral already started\n");
+ return;
+ }
+
+ set_capabilities(otg);
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ if (dwc->gadget_driver) {
+ struct dwc3_ep *dep;
+ int ret;
+
+ spin_lock(&otg->lock);
+ dep = dwc->eps[0];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err0;
+
+ dep = dwc->eps[1];
+
+ ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
+ if (ret)
+ goto err1;
+
+ otg_dbg(otg, "enabled ep in gadget driver\n");
+ /* begin to receive SETUP packets */
+ dwc->ep0state = EP0_SETUP_PHASE;
+ dwc3_ep0_out_start(dwc);
+
+ otg_dbg(otg, "enabled irq\n");
+ dwc3_gadget_enable_irq(dwc);
+
+ otg_write(otg, DCTL, otg_read(otg, DCTL) | DCTL_RUN_STOP);
+ otg_dbg(otg, "Setting DCTL_RUN_STOP to 1 in DCTL\n");
+ spin_unlock(&otg->lock);
+ }
+
+ gadget->b_hnp_enable = 0;
+ gadget->host_request_flag = 0;
+
+ otg->peripheral_started = 1;
+
+ /*
+ * During HNP the bus shouldn't be idle for more than 155 ms, so
+ * give enough time for the host to load the stack before start
+ * triggerring events
+ */
+ msleep(500);
+
+ return;
+err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+err0:
+ return;
+}
+
+static void stop_peripheral(struct dwc3_otg *otg)
+{
+ struct usb_gadget *gadget = otg->otg.gadget;
+ struct dwc3 *dwc = otg->dwc;
+
+ otg_dbg(otg, "\n");
+
+ if (!otg->peripheral_started) {
+ otg_info(otg, "Peripheral already stopped\n");
+ return;
+ }
+
+ if (!gadget)
+ return;
+
+ otg_dbg(otg, "disabled ep in gadget driver\n");
+ spin_lock(&otg->lock);
+
+ dwc3_gadget_disable_irq(dwc);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_ep_disable(dwc->eps[1]);
+
+ spin_unlock(&otg->lock);
+
+ otg->peripheral_started = 0;
+ msleep(20);
+}
+
+static void set_b_host(struct dwc3_otg *otg, int val)
+{
+ otg->otg.host->is_b_host = val;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg);
+
+static int init_b_device(struct dwc3_otg *otg)
+{
+ otg_dbg(otg, "\n");
+ set_capabilities(otg);
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to start peripheral\n");
+
+ return do_b_idle(otg);
+}
+
+static int init_a_device(struct dwc3_otg *otg)
+{
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OCTL, 0);
+
+ otg_dbg(otg, "Write 0 to OCFG and OCTL\n");
+ return OTG_STATE_A_IDLE;
+}
+
+static enum usb_otg_state do_connector_id_status(struct dwc3_otg *otg)
+{
+ enum usb_otg_state state;
+ u32 osts;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OCFG, 0);
+ otg_write(otg, OEVTEN, 0);
+ otg_write(otg, OEVT, 0xffffffff);
+ otg_write(otg, OEVTEN, OEVT_CONN_ID_STS_CHNG_EVNT);
+
+ msleep(60);
+
+ osts = otg_read(otg, OSTS);
+ if (!(osts & OSTS_CONN_ID_STS)) {
+ otg_dbg(otg, "Connector ID is A\n");
+ state = init_a_device(otg);
+ } else {
+ otg_dbg(otg, "Connector ID is B\n");
+ stop_host(otg);
+ state = init_b_device(otg);
+ }
+
+ /* TODO: This is a workaround for latest hibernation-enabled bitfiles
+ * which have problems before initializing SRP.
+ */
+ msleep(50);
+
+ return state;
+}
+
+static void reset_hw(struct dwc3_otg *otg)
+{
+ u32 temp;
+
+ otg_dbg(otg, "\n");
+
+ otg_write(otg, OEVTEN, 0);
+ temp = otg_read(otg, OCTL);
+ temp &= OCTL_PERI_MODE;
+ otg_write(otg, OCTL, temp);
+ temp = otg_read(otg, GCTL);
+ temp |= GCTL_PRT_CAP_DIR_OTG << GCTL_PRT_CAP_DIR_SHIFT;
+ otg_write(otg, GCTL, temp);
+}
+
+#define SRP_TIMEOUT 6000
+
+static void start_srp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_SES_REQ;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_SES_REQ in OCTL\n");
+}
+
+static void start_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set (OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void stop_b_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN);
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "Clear ~(OCTL_HNP_REQ | OCTL_DEV_SET_HNP_EN) in OCTL\n");
+}
+
+static void start_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl |= OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "set OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static void stop_a_hnp(struct dwc3_otg *otg)
+{
+ u32 octl;
+
+ octl = otg_read(otg, OCTL);
+ octl &= ~OCTL_HST_SET_HNP_EN;
+ otg_write(otg, OCTL, octl);
+ otg_dbg(otg, "clear OCTL_HST_SET_HNP_EN in OCTL\n");
+}
+
+static enum usb_otg_state do_a_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_HNP_CHNG_EVNT;
+
+ start_a_hnp(otg);
+ rc = 3000;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ stop_a_hnp(otg);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HNP_CHNG_EVNT\n");
+ if (otg_events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "A-HNP Success\n");
+ return OTG_STATE_A_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "A-HNP Failed\n");
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A-HNP Failed (Timed out)\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else {
+ goto again;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+ user_mask = USER_SRP_EVENT |
+ USER_HNP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return OTG_STATE_A_SUSPEND;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VFALL_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_vfall(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_A_DEV_IDLE_EVNT;
+
+ rc = A_WAIT_VFALL_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_A_DEV_IDLE_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_IDLE_EVNT\n");
+ return OTG_STATE_A_IDLE;
+
+ } else if (rc == 0) {
+ otg_dbg(otg, "A_WAIT_VFALL_TIMEOUT\n");
+ return OTG_STATE_A_IDLE;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+
+}
+
+#define A_WAIT_BCON_TIMEOUT 1000
+
+static enum usb_otg_state do_a_wait_bconn(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_HOST_EVNT;
+
+ rc = A_WAIT_BCON_TIMEOUT;
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_HOST_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_HOST_EVNT\n");
+ return OTG_STATE_A_HOST;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_HOST;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+#define A_WAIT_VRISE_TIMEOUT 100
+
+static enum usb_otg_state do_a_wait_vrise(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 otg_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 0);
+ start_host(otg);
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT;
+
+ rc = A_WAIT_VRISE_TIMEOUT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &otg_events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (rc == 0) {
+ if (otg_read(otg, OCTL) & OCTL_PRT_PWR_CTL)
+ return OTG_STATE_A_WAIT_BCON;
+ else
+ return OTG_STATE_A_WAIT_VFALL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_A_DEV_SRP_DET_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events,
+ 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_A_DEV_SRP_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SRP_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "User initiated VBUS\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_a_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_A_DEV_SESS_END_DET_EVNT |
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+
+ } else if (otg_events & OEVT_A_DEV_SESS_END_DET_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_SESS_END_DET_EVNT\n");
+ return OTG_STATE_A_WAIT_VFALL;
+
+ } else if (otg_events & OEVT_A_DEV_B_DEV_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_A_DEV_B_DEV_HOST_END_EVNT\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+#define HNP_TIMEOUT 4000
+
+static enum usb_otg_state do_b_hnp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_HNP_CHNG_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ start_b_hnp(otg);
+ rc = HNP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ stop_b_hnp(otg);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ return OTG_STATE_B_IDLE;
+ } else if (events & OEVT_B_DEV_HNP_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_HNP_CHNG_EVNT\n");
+ if (events & OEVT_HST_NEG_SCS) {
+ otg_dbg(otg, "B-HNP Success\n");
+ return OTG_STATE_B_WAIT_ACON;
+
+ } else {
+ otg_err(otg, "B-HNP Failed\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+ } else if (rc == 0) {
+ /* Timeout */
+ otg_err(otg, "HNP timed out!\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_peripheral(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT | OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_HNP_EVENT | USER_END_SESSION |
+ USER_SRP_EVENT | INITIAL_SRP;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ } else if (user_events & USER_HNP_EVENT) {
+ otg_dbg(otg, "USER_HNP_EVENT\n");
+ return do_b_hnp_init(otg);
+ } else if (user_events & USER_END_SESSION) {
+ otg_dbg(otg, "USER_END_SESSION\n");
+ return OTG_STATE_B_IDLE;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_wait_acon(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ otg_dbg(otg, "");
+ set_b_host(otg, 1);
+ start_host(otg);
+ otg_mask = OEVT_B_DEV_B_HOST_END_EVNT;
+ otg_write(otg, OEVTEN, otg_mask);
+ reset_port(otg);
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ usb_kick_hub_wq(hcd->self.root_hub);
+ if (xhci->shared_hcd)
+ usb_kick_hub_wq(xhci->shared_hcd->self.root_hub);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_A_CONN_EVENT | USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_A_CONN_EVENT) {
+ otg_dbg(otg, "A-device connected\n");
+ return OTG_STATE_B_HOST;
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_host(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask = 0;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_B_HOST_END_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT |
+ OEVT_HOST_ROLE_REQ_INIT_EVNT;
+ user_mask = USER_HNP_END_SESSION;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ /* Higher priority first */
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (otg_events & OEVT_B_DEV_B_HOST_END_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_B_HOST_END_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ goto again;
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ return OTG_STATE_B_IDLE;
+ }
+ } else if (user_events & USER_HNP_END_SESSION) {
+ otg_dbg(otg, "USER_HNP_END_SESSION\n");
+ return OTG_STATE_B_PERIPHERAL;
+ }
+
+ /* Invalid state */
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_idle(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 user_mask;
+ u32 otg_events = 0;
+ u32 user_events = 0;
+
+ otg_dbg(otg, "");
+
+ if (!set_peri_mode(otg, PERI_MODE_PERIPHERAL))
+ otg_err(otg, "Failed to set peripheral mode\n");
+
+ dwc3_otg_setup_event_buffers(otg);
+
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+ user_mask = USER_SRP_EVENT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, user_mask,
+ &otg_events, &user_events, 0);
+
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (otg_events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if ((otg_events & OEVT_B_DEV_VBUS_CHNG_EVNT) ||
+ (otg_events & OEVT_B_DEV_SES_VLD_DET_EVNT)) {
+ otg_dbg(otg, "OEVT_B_DEV_VBUS_CHNG_EVNT\n");
+ if (otg_events & OEVT_B_SES_VLD_EVT) {
+ otg_dbg(otg, "Session valid\n");
+ return OTG_STATE_B_PERIPHERAL;
+
+ } else {
+ otg_dbg(otg, "Session not valid\n");
+ goto again;
+ }
+ } else if (user_events & USER_SRP_EVENT) {
+ otg_dbg(otg, "USER_SRP_EVENT\n");
+ return OTG_STATE_B_SRP_INIT;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+static enum usb_otg_state do_b_srp_init(struct dwc3_otg *otg)
+{
+ int rc;
+ u32 otg_mask;
+ u32 events = 0;
+
+ otg_dbg(otg, "");
+ otg_mask = OEVT_CONN_ID_STS_CHNG_EVNT |
+ OEVT_B_DEV_SES_VLD_DET_EVNT |
+ OEVT_B_DEV_VBUS_CHNG_EVNT;
+
+ otg_write(otg, OEVTEN, otg_mask);
+ start_srp(otg);
+
+ rc = SRP_TIMEOUT;
+
+again:
+ rc = sleep_until_event(otg,
+ otg_mask, 0,
+ &events, NULL, rc);
+ if (rc < 0)
+ return OTG_STATE_UNDEFINED;
+
+ if (events & OEVT_CONN_ID_STS_CHNG_EVNT) {
+ otg_dbg(otg, "OEVT_CONN_ID_STS_CHNG_EVNT\n");
+ return OTG_STATE_UNDEFINED;
+ } else if (events & OEVT_B_DEV_SES_VLD_DET_EVNT) {
+ otg_dbg(otg, "OEVT_B_DEV_SES_VLD_DET_EVNT\n");
+ return OTG_STATE_B_PERIPHERAL;
+ } else if (rc == 0) {
+ otg_dbg(otg, "SRP Timeout (rc=%d)\n", rc);
+ otg_info(otg, "DEVICE NO RESPONSE FOR SRP\n");
+ return OTG_STATE_B_IDLE;
+
+ } else {
+ goto again;
+ }
+
+ return OTG_STATE_UNDEFINED;
+}
+
+int otg_main_thread(void *data)
+{
+ struct dwc3_otg *otg = (struct dwc3_otg *)data;
+ enum usb_otg_state prev = OTG_STATE_UNDEFINED;
+
+#ifdef VERBOSE_DEBUG
+ u32 snpsid = otg_read(otg, 0xc120);
+
+ otg_vdbg(otg, "io_priv=%p\n", otg->regs);
+ otg_vdbg(otg, "c120: %x\n", snpsid);
+#endif
+
+ /* Allow the thread to be killed by a signal, but set the signal mask
+ * to block everything but INT, TERM, KILL, and USR1.
+ */
+ allow_signal(SIGINT);
+ allow_signal(SIGTERM);
+ allow_signal(SIGKILL);
+ allow_signal(SIGUSR1);
+
+ /* Allow the thread to be frozen */
+ set_freezable();
+
+ /* Allow host/peripheral driver load to finish */
+ msleep(100);
+
+ reset_hw(otg);
+
+ stop_host(otg);
+ stop_peripheral(otg);
+
+ otg_dbg(otg, "Thread running\n");
+ while (1) {
+ enum usb_otg_state next = OTG_STATE_UNDEFINED;
+
+ otg_vdbg(otg, "Main thread entering state\n");
+
+ switch (otg->otg.state) {
+ case OTG_STATE_UNDEFINED:
+ otg_dbg(otg, "OTG_STATE_UNDEFINED\n");
+ next = do_connector_id_status(otg);
+ break;
+
+ case OTG_STATE_A_IDLE:
+ otg_dbg(otg, "OTG_STATE_A_IDLE\n");
+ stop_peripheral(otg);
+
+ if (prev == OTG_STATE_UNDEFINED)
+ next = OTG_STATE_A_WAIT_VRISE;
+ else
+ next = do_a_idle(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VRISE:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VRISE\n");
+ next = do_a_wait_vrise(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_BCON:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_BCON\n");
+ next = do_a_wait_bconn(otg);
+ break;
+
+ case OTG_STATE_A_HOST:
+ otg_dbg(otg, "OTG_STATE_A_HOST\n");
+ stop_peripheral(otg);
+ next = do_a_host(otg);
+ /* Don't stop the host here if we are going into
+ * A_SUSPEND. We need to delay that until later. It
+ * will be stopped when coming out of A_SUSPEND
+ * state.
+ */
+ if (next != OTG_STATE_A_SUSPEND)
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_SUSPEND:
+ otg_dbg(otg, "OTG_STATE_A_SUSPEND\n");
+ next = do_a_hnp_init(otg);
+
+ /* Stop the host. */
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_WAIT_VFALL:
+ otg_dbg(otg, "OTG_STATE_A_WAIT_VFALL\n");
+ next = do_a_wait_vfall(otg);
+ stop_host(otg);
+ break;
+
+ case OTG_STATE_A_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_A_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_a_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_IDLE:
+ otg_dbg(otg, "OTG_STATE_B_IDLE\n");
+ next = do_b_idle(otg);
+ break;
+
+ case OTG_STATE_B_PERIPHERAL:
+ otg_dbg(otg, "OTG_STATE_B_PERIPHERAL\n");
+ stop_host(otg);
+ start_peripheral(otg);
+ next = do_b_peripheral(otg);
+ stop_peripheral(otg);
+ break;
+
+ case OTG_STATE_B_SRP_INIT:
+ otg_dbg(otg, "OTG_STATE_B_SRP_INIT\n");
+ otg_read(otg, OSTS);
+ next = do_b_srp_init(otg);
+ break;
+
+ case OTG_STATE_B_WAIT_ACON:
+ otg_dbg(otg, "OTG_STATE_B_WAIT_ACON\n");
+ next = do_b_wait_acon(otg);
+ break;
+
+ case OTG_STATE_B_HOST:
+ otg_dbg(otg, "OTG_STATE_B_HOST\n");
+ next = do_b_host(otg);
+ stop_host(otg);
+ break;
+
+ default:
+ otg_err(otg, "Unknown state %d, sleeping...\n",
+ otg->state);
+ sleep_main_thread(otg);
+ break;
+ }
+
+ prev = otg->otg.state;
+ otg->otg.state = next;
+ if (kthread_should_stop())
+ break;
+ }
+
+ otg->main_thread = NULL;
+ otg_dbg(otg, "OTG main thread exiting....\n");
+
+ return 0;
+}
+
+static void start_main_thread(struct dwc3_otg *otg)
+{
+ if (!otg->main_thread && otg->otg.gadget && otg->otg.host) {
+ otg_dbg(otg, "Starting OTG main thread\n");
+ otg->main_thread = kthread_create(otg_main_thread, otg, "otg");
+ wake_up_process(otg->main_thread);
+ }
+}
+
+static inline struct dwc3_otg *otg_to_dwc3_otg(struct usb_otg *x)
+{
+ return container_of(x, struct dwc3_otg, otg);
+}
+
+static irqreturn_t dwc3_otg_irq(int irq, void *_otg)
+{
+ struct dwc3_otg *otg;
+ u32 oevt;
+ u32 osts;
+ u32 octl;
+ u32 ocfg;
+ u32 oevten;
+ u32 otg_mask = OEVT_ALL;
+
+ if (!_otg)
+ return 0;
+
+ otg = (struct dwc3_otg *)_otg;
+
+ oevt = otg_read(otg, OEVT);
+ osts = otg_read(otg, OSTS);
+ octl = otg_read(otg, OCTL);
+ ocfg = otg_read(otg, OCFG);
+ oevten = otg_read(otg, OEVTEN);
+
+ /* Clear handled events */
+ otg_write(otg, OEVT, oevt);
+
+ otg_vdbg(otg, "\n");
+ otg_vdbg(otg, " oevt = %08x\n", oevt);
+ otg_vdbg(otg, " osts = %08x\n", osts);
+ otg_vdbg(otg, " octl = %08x\n", octl);
+ otg_vdbg(otg, " ocfg = %08x\n", ocfg);
+ otg_vdbg(otg, " oevten = %08x\n", oevten);
+
+ otg_vdbg(otg, "oevt[DeviceMode] = %s\n",
+ oevt & OEVT_DEV_MOD_EVNT ? "Device" : "Host");
+
+ if (oevt & OEVT_CONN_ID_STS_CHNG_EVNT)
+ otg_dbg(otg, "Connector ID Status Change Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_INIT_EVNT)
+ otg_dbg(otg, "Host Role Request Init Notification Event\n");
+ if (oevt & OEVT_HOST_ROLE_REQ_CONFIRM_EVNT)
+ otg_dbg(otg, "Host Role Request Confirm Notification Event\n");
+ if (oevt & OEVT_A_DEV_B_DEV_HOST_END_EVNT)
+ otg_dbg(otg, "A-Device B-Host End Event\n");
+ if (oevt & OEVT_A_DEV_HOST_EVNT)
+ otg_dbg(otg, "A-Device Host Event\n");
+ if (oevt & OEVT_A_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "A-Device HNP Change Event\n");
+ if (oevt & OEVT_A_DEV_SRP_DET_EVNT)
+ otg_dbg(otg, "A-Device SRP Detect Event\n");
+ if (oevt & OEVT_A_DEV_SESS_END_DET_EVNT)
+ otg_dbg(otg, "A-Device Session End Detected Event\n");
+ if (oevt & OEVT_B_DEV_B_HOST_END_EVNT)
+ otg_dbg(otg, "B-Device B-Host End Event\n");
+ if (oevt & OEVT_B_DEV_HNP_CHNG_EVNT)
+ otg_dbg(otg, "B-Device HNP Change Event\n");
+ if (oevt & OEVT_B_DEV_SES_VLD_DET_EVNT)
+ otg_dbg(otg, "B-Device Session Valid Detect Event\n");
+ if (oevt & OEVT_B_DEV_VBUS_CHNG_EVNT)
+ otg_dbg(otg, "B-Device VBUS Change Event\n");
+
+ if (oevt & otg_mask) {
+ /* Pass event to main thread */
+ spin_lock(&otg->lock);
+ otg->otg_events |= oevt;
+ wakeup_main_thread(otg);
+ spin_unlock(&otg->lock);
+ return 1;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hnp_polling_work(struct work_struct *w)
+{
+ struct dwc3_otg *otg = container_of(w, struct dwc3_otg,
+ hp_work.work);
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ u8 *otgstatus;
+ int ret;
+ int err;
+
+ hcd = container_of(otg->otg.host, struct usb_hcd, self);
+ if (!hcd)
+ return;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return;
+
+ otgstatus = kmalloc(sizeof(*otgstatus), GFP_NOIO);
+ if (!otgstatus)
+ return;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE,
+ 0, 0xf000, otgstatus, sizeof(*otgstatus),
+ USB_CTRL_GET_TIMEOUT);
+
+ if (ret == sizeof(*otgstatus) && (*otgstatus & 0x1)) {
+ /* enable HNP before suspend, it's simpler */
+
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ udev->bus->b_hnp_enable
+ ? USB_DEVICE_B_HNP_ENABLE
+ : USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+
+ if (err < 0) {
+ /* OTG MESSAGE: report errors here,
+ * customize to match your product.
+ */
+ otg_info(otg, "ERROR : Device no response\n");
+ dev_info(&udev->dev, "can't set HNP mode: %d\n",
+ err);
+ udev->bus->b_hnp_enable = 0;
+ if (le16_to_cpu(udev->descriptor.idVendor) == 0x1a0a) {
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND)
+ < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n",
+ err);
+ }
+ } else {
+ /* Device wants role-switch, suspend the bus. */
+ static struct usb_phy *phy;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ otg_start_hnp(phy->otg);
+ usb_put_phy(phy);
+
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ }
+ } else if (ret < 0) {
+ udev->bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (usb_port_suspend(udev, PMSG_AUTO_SUSPEND) < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ } else {
+ schedule_delayed_work(&otg->hp_work, 1 * HZ);
+ }
+
+ kfree(otgstatus);
+}
+
+static int dwc3_otg_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct usb_bus *bus;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ struct dwc3_otg *otg;
+ int err = 0;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ hcd = container_of(phy->otg->host, struct usb_hcd, self);
+ if (!hcd)
+ return -EINVAL;
+
+ bus = &hcd->self;
+ if (!bus->otg_port)
+ return 0;
+
+ udev = usb_hub_find_child(bus->root_hub, bus->otg_port);
+ if (!udev)
+ return 0;
+
+ /*
+ * OTG-aware devices on OTG-capable root hubs may be able to use SRP,
+ * to wake us after we've powered off VBUS; and HNP, switching roles
+ * "host" to "peripheral". The OTG descriptor helps figure this out.
+ */
+ if (udev->config && udev->parent == udev->bus->root_hub) {
+ struct usb_otg20_descriptor *desc = NULL;
+
+ /* descriptor may appear anywhere in config */
+ err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **)&desc, sizeof(*desc));
+ if (err || !(desc->bmAttributes & USB_OTG_HNP))
+ return 0;
+
+ if (udev->portnum == udev->bus->otg_port) {
+ INIT_DELAYED_WORK(&otg->hp_work,
+ hnp_polling_work);
+ schedule_delayed_work(&otg->hp_work, HZ);
+ }
+
+ }
+
+ return err;
+}
+
+static int dwc3_otg_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct dwc3_otg *otg;
+
+ otg = otg_to_dwc3_otg(phy->otg);
+
+ if (work_pending(&otg->hp_work.work)) {
+ while (!cancel_delayed_work(&otg->hp_work))
+ msleep(20);
+ }
+ return 0;
+}
+
+static void dwc3_otg_set_peripheral(struct usb_otg *_otg, int yes)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (yes) {
+ if (otg->hwparams6 == 0xdeadbeef)
+ otg->hwparams6 = otg_read(otg, GHWPARAMS6);
+ stop_host(otg);
+ } else {
+ stop_peripheral(otg);
+ }
+
+ set_peri_mode(otg, yes);
+}
+
+static int dwc3_otg_set_periph(struct usb_otg *_otg, struct usb_gadget *gadget)
+{
+ struct dwc3_otg *otg;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if ((long)gadget == 1) {
+ dwc3_otg_set_peripheral(_otg, 1);
+ return 0;
+ }
+
+ if (!gadget) {
+ otg->otg.gadget = NULL;
+ return -ENODEV;
+ }
+
+ otg->otg.gadget = gadget;
+ otg->otg.gadget->hnp_polling_support = 1;
+ otg->otg.state = OTG_STATE_B_IDLE;
+
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_set_host(struct usb_otg *_otg, struct usb_bus *host)
+{
+ struct dwc3_otg *otg;
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ if (!_otg)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(_otg);
+ otg_dbg(otg, "\n");
+
+ if (host == (struct usb_bus *)0xdeadbeef) {
+ dwc3_otg_set_peripheral(_otg, 0);
+ return 0;
+ }
+
+ if (!host) {
+ otg->otg.host = NULL;
+ otg->hcd_irq = 0;
+ return -ENODEV;
+ }
+
+ hcd = container_of(host, struct usb_hcd, self);
+ xhci = hcd_to_xhci(hcd);
+ otg_dbg(otg, "hcd=%p xhci=%p\n", hcd, xhci);
+
+ hcd->self.otg_port = 1;
+ if (xhci->shared_hcd) {
+ xhci->shared_hcd->self.otg_port = 1;
+ otg_dbg(otg, "shared_hcd=%p\n", xhci->shared_hcd);
+ }
+
+ otg->otg.host = host;
+ otg->hcd_irq = hcd->irq;
+ otg_dbg(otg, "host=%p irq=%d\n", otg->otg.host, otg->hcd_irq);
+
+
+ otg->host_started = 1;
+ otg->dev_enum = 0;
+ start_main_thread(otg);
+ return 0;
+}
+
+static int dwc3_otg_start_srp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_SRP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_start_hnp(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_HNP_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int dwc3_otg_end_session(struct usb_otg *x)
+{
+ unsigned long flags;
+ struct dwc3_otg *otg;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= USER_END_SESSION;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+static int otg_end_session(struct usb_otg *otg)
+{
+ return dwc3_otg_end_session(otg);
+}
+
+static int dwc3_otg_received_host_release(struct usb_otg *x)
+{
+ struct dwc3_otg *otg;
+ unsigned long flags;
+
+ if (!x)
+ return -ENODEV;
+
+ otg = otg_to_dwc3_otg(x);
+ otg_dbg(otg, "\n");
+
+ if (!otg->otg.host || !otg->otg.gadget)
+ return -ENODEV;
+
+ spin_lock_irqsave(&otg->lock, flags);
+ otg->user_events |= PCD_RECEIVED_HOST_RELEASE_EVENT;
+ wakeup_main_thread(otg);
+ spin_unlock_irqrestore(&otg->lock, flags);
+ return 0;
+}
+
+int otg_host_release(struct usb_otg *otg)
+{
+ return dwc3_otg_received_host_release(otg);
+}
+EXPORT_SYMBOL(otg_host_release);
+
+static void dwc3_otg_enable_irq(struct dwc3_otg *otg)
+{
+ u32 reg;
+
+ /* Enable OTG IRQs */
+ reg = OEVT_ALL;
+
+ otg_write(otg, OEVTEN, reg);
+}
+
+static ssize_t store_srp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_start_srp(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(srp, 0220, NULL, store_srp);
+
+static ssize_t store_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg_end_session(otg);
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(end, 0220, NULL, store_end);
+
+static ssize_t store_hnp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct usb_phy *phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ struct usb_otg *otg;
+
+ dev_dbg(dwc->dev, "%s()\n", __func__);
+
+ if (IS_ERR(phy) || !phy) {
+ dev_info(dwc->dev, "NO PHY!!\n");
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ dev_info(dwc->dev, "NO OTG!!\n");
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dev_info(dev, "b_hnp_enable is FALSE\n");
+ dwc->gadget.host_request_flag = 1;
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp, 0220, NULL, store_hnp);
+
+static ssize_t store_hnp_end(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_phy *phy;
+ struct usb_otg *otg;
+ unsigned long flags;
+ struct dwc3_otg *dwc_otg;
+
+ phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (IS_ERR(phy) || !phy) {
+ if (!IS_ERR(phy))
+ usb_put_phy(phy);
+ return count;
+ }
+
+ otg = phy->otg;
+ if (!otg) {
+ usb_put_phy(phy);
+ return count;
+ }
+
+ dwc_otg = otg_to_dwc3_otg(otg);
+
+ spin_lock_irqsave(&dwc_otg->lock, flags);
+ dwc_otg->user_events |= USER_HNP_END_SESSION;
+ wakeup_main_thread(dwc_otg);
+ spin_unlock_irqrestore(&dwc_otg->lock, flags);
+
+ usb_put_phy(phy);
+ return count;
+}
+static DEVICE_ATTR(hnp_end, 0220, NULL, store_hnp_end);
+
+static ssize_t store_a_hnp_reqd(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ host_release(otg);
+ return count;
+}
+static DEVICE_ATTR(a_hnp_reqd, 0220, NULL, store_a_hnp_reqd);
+
+static ssize_t store_print_dbg(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ struct dwc3_otg *otg;
+
+ otg = dwc->otg;
+ print_debug_regs(otg);
+
+ return count;
+}
+static DEVICE_ATTR(print_dbg, 0220, NULL, store_print_dbg);
+
+void dwc_usb3_remove_dev_files(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_print_dbg);
+ device_remove_file(dev, &dev_attr_a_hnp_reqd);
+ device_remove_file(dev, &dev_attr_end);
+ device_remove_file(dev, &dev_attr_srp);
+ device_remove_file(dev, &dev_attr_hnp);
+ device_remove_file(dev, &dev_attr_hnp_end);
+}
+
+int dwc3_otg_create_dev_files(struct device *dev)
+{
+ int retval;
+
+ retval = device_create_file(dev, &dev_attr_hnp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_hnp_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_srp);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_end);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_a_hnp_reqd);
+ if (retval)
+ goto fail;
+
+ retval = device_create_file(dev, &dev_attr_print_dbg);
+ if (retval)
+ goto fail;
+
+ return 0;
+
+fail:
+ dev_err(dev, "Failed to create one or more sysfs files!!\n");
+ return retval;
+}
+
+void dwc3_otg_init(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg;
+ int err;
+ u32 reg;
+
+ dev_dbg(dwc->dev, "dwc3_otg_init\n");
+
+ /*
+ * GHWPARAMS6[10] bit is SRPSupport.
+ * This bit also reflects DWC_USB3_EN_OTG
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GHWPARAMS6);
+ if (!(reg & GHWPARAMS6_SRP_SUPPORT_ENABLED)) {
+ /*
+ * No OTG support in the HW core.
+ * We return 0 to indicate no error, since this is acceptable
+ * situation, just continue probe the dwc3 driver without otg.
+ */
+ dev_dbg(dwc->dev, "dwc3_otg address space is not supported\n");
+ return;
+ }
+
+ otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+ if (!otg) {
+ dev_err(otg->dev, "failed to allocate memroy\n");
+ return;
+ }
+
+ dwc->otg = otg;
+ otg->dev = dwc->dev;
+ otg->dwc = dwc;
+
+ otg->regs = dwc->regs - DWC3_GLOBALS_REGS_START;
+ otg->otg.usb_phy = kzalloc(sizeof(struct usb_phy), GFP_KERNEL);
+ otg->otg.usb_phy->dev = otg->dev;
+ otg->otg.usb_phy->label = "dwc3_otg";
+ otg->otg.state = OTG_STATE_UNDEFINED;
+ otg->otg.usb_phy->otg = &otg->otg;
+ otg->otg.usb_phy->notify_connect = dwc3_otg_notify_connect;
+ otg->otg.usb_phy->notify_disconnect = dwc3_otg_notify_disconnect;
+
+ otg->otg.start_srp = dwc3_otg_start_srp;
+ otg->otg.start_hnp = dwc3_otg_start_hnp;
+ otg->otg.set_host = dwc3_otg_set_host;
+ otg->otg.set_peripheral = dwc3_otg_set_periph;
+
+ otg->hwparams6 = reg;
+ otg->state = OTG_STATE_UNDEFINED;
+
+ spin_lock_init(&otg->lock);
+ init_waitqueue_head(&otg->main_wq);
+
+ err = usb_add_phy(otg->otg.usb_phy, USB_PHY_TYPE_USB3);
+ if (err) {
+ dev_err(otg->dev, "can't register transceiver, err: %d\n",
+ err);
+ goto exit;
+ }
+
+ otg->irq = platform_get_irq(to_platform_device(otg->dev), 1);
+
+ dwc3_otg_create_dev_files(otg->dev);
+
+ /* Set irq handler */
+ err = request_irq(otg->irq, dwc3_otg_irq, IRQF_SHARED, "dwc3_otg", otg);
+ if (err) {
+ dev_err(otg->otg.usb_phy->dev, "failed to request irq #%d --> %d\n",
+ otg->irq, err);
+ goto exit;
+ }
+
+ dwc3_otg_enable_irq(otg);
+
+ err = dwc3_gadget_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize gadget\n");
+ goto exit;
+ }
+
+ err = dwc3_host_init(dwc);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(otg->otg.usb_phy->dev,
+ "failed to initialize host\n");
+ goto exit;
+ }
+
+ return;
+
+exit:
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
+
+void dwc3_otg_exit(struct dwc3 *dwc)
+{
+ struct dwc3_otg *otg = dwc->otg;
+
+ otg_dbg(otg, "\n");
+ usb_remove_phy(otg->otg.usb_phy);
+ kfree(otg->otg.usb_phy);
+ kfree(otg);
+}
diff --git a/drivers/usb/dwc3/otg.h b/drivers/usb/dwc3/otg.h
new file mode 100644
index 000000000000..81eab4b3509d
--- /dev/null
+++ b/drivers/usb/dwc3/otg.h
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * otg.h - DesignWare USB3 DRD OTG Header
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+
+#define otg_dbg(d, fmt, args...) dev_dbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_vdbg(d, fmt, args...) dev_vdbg((d)->dev, "%s(): " fmt,\
+ __func__, ## args)
+#define otg_err(d, fmt, args...) dev_err((d)->dev, "%s(): ERROR: " fmt,\
+ __func__, ## args)
+#define otg_warn(d, fmt, args...) dev_warn((d)->dev, "%s(): WARN: " fmt,\
+ __func__, ## args)
+#define otg_info(d, fmt, args...) dev_info((d)->dev, "%s(): INFO: " fmt,\
+ __func__, ## args)
+
+#ifdef VERBOSE_DEBUG
+#define otg_write(o, reg, val) do { \
+ otg_vdbg(o, "OTG_WRITE: reg=0x%05x, val=0x%08x\n", reg, val); \
+ writel(val, ((void *)((o)->regs)) + reg); \
+ } while (0)
+
+#define otg_read(o, reg) ({ \
+ u32 __r = readl(((void *)((o)->regs)) + reg); \
+ otg_vdbg(o, "OTG_READ: reg=0x%05x, val=0x%08x\n", reg, __r); \
+ __r; \
+ })
+#else
+#define otg_write(o, reg, val) writel(val, ((void *)((o)->regs)) + reg)
+#define otg_read(o, reg) readl(((void *)((o)->regs)) + reg)
+#endif
+
+#define sleep_main_thread_until_condition_timeout(otg, condition, msecs) ({ \
+ int __timeout = msecs; \
+ while (!(condition)) { \
+ otg_dbg(otg, " ... sleeping for %d\n", __timeout); \
+ __timeout = sleep_main_thread_timeout(otg, __timeout); \
+ if (__timeout <= 0) { \
+ break; \
+ } \
+ } \
+ __timeout; \
+ })
+
+#define sleep_main_thread_until_condition(otg, condition) ({ \
+ int __rc; \
+ do { \
+ __rc = sleep_main_thread_until_condition_timeout(otg, \
+ condition, 50000); \
+ } while (__rc == 0); \
+ __rc; \
+ })
+
+#define GHWPARAMS6 0xc158
+#define GHWPARAMS6_SRP_SUPPORT_ENABLED 0x0400
+#define GHWPARAMS6_HNP_SUPPORT_ENABLED 0x0800
+
+#define GCTL 0xc110
+#define GCTL_PRT_CAP_DIR 0x3000
+#define GCTL_PRT_CAP_DIR_SHIFT 12
+#define GCTL_PRT_CAP_DIR_HOST 1
+#define GCTL_PRT_CAP_DIR_DEV 2
+#define GCTL_PRT_CAP_DIR_OTG 3
+#define GCTL_GBL_HIBERNATION_EN 0x2
+
+#define OCFG 0xcc00
+#define OCFG_SRP_CAP 0x01
+#define OCFG_SRP_CAP_SHIFT 0
+#define OCFG_HNP_CAP 0x02
+#define OCFG_HNP_CAP_SHIFT 1
+#define OCFG_OTG_VERSION 0x04
+#define OCFG_OTG_VERSION_SHIFT 2
+
+#define OCTL 0xcc04
+#define OCTL_HST_SET_HNP_EN 0x01
+#define OCTL_HST_SET_HNP_EN_SHIFT 0
+#define OCTL_DEV_SET_HNP_EN 0x02
+#define OCTL_DEV_SET_HNP_EN_SHIFT 1
+#define OCTL_TERM_SEL_DL_PULSE 0x04
+#define OCTL_TERM_SEL_DL_PULSE_SHIFT 2
+#define OCTL_SES_REQ 0x08
+#define OCTL_SES_REQ_SHIFT 3
+#define OCTL_HNP_REQ 0x10
+#define OCTL_HNP_REQ_SHIFT 4
+#define OCTL_PRT_PWR_CTL 0x20
+#define OCTL_PRT_PWR_CTL_SHIFT 5
+#define OCTL_PERI_MODE 0x40
+#define OCTL_PERI_MODE_SHIFT 6
+
+#define OEVT 0xcc08
+#define OEVT_ERR 0x00000001
+#define OEVT_ERR_SHIFT 0
+#define OEVT_SES_REQ_SCS 0x00000002
+#define OEVT_SES_REQ_SCS_SHIFT 1
+#define OEVT_HST_NEG_SCS 0x00000004
+#define OEVT_HST_NEG_SCS_SHIFT 2
+#define OEVT_B_SES_VLD_EVT 0x00000008
+#define OEVT_B_SES_VLD_EVT_SHIFT 3
+#define OEVT_B_DEV_VBUS_CHNG_EVNT 0x00000100
+#define OEVT_B_DEV_VBUS_CHNG_EVNT_SHIFT 8
+#define OEVT_B_DEV_SES_VLD_DET_EVNT 0x00000200
+#define OEVT_B_DEV_SES_VLD_DET_EVNT_SHIFT 9
+#define OEVT_B_DEV_HNP_CHNG_EVNT 0x00000400
+#define OEVT_B_DEV_HNP_CHNG_EVNT_SHIFT 10
+#define OEVT_B_DEV_B_HOST_END_EVNT 0x00000800
+#define OEVT_B_DEV_B_HOST_END_EVNT_SHIFT 11
+#define OEVT_A_DEV_SESS_END_DET_EVNT 0x00010000
+#define OEVT_A_DEV_SESS_END_DET_EVNT_SHIFT 16
+#define OEVT_A_DEV_SRP_DET_EVNT 0x00020000
+#define OEVT_A_DEV_SRP_DET_EVNT_SHIFT 17
+#define OEVT_A_DEV_HNP_CHNG_EVNT 0x00040000
+#define OEVT_A_DEV_HNP_CHNG_EVNT_SHIFT 18
+#define OEVT_A_DEV_HOST_EVNT 0x00080000
+#define OEVT_A_DEV_HOST_EVNT_SHIFT 19
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT 0x00100000
+#define OEVT_A_DEV_B_DEV_HOST_END_EVNT_SHIFT 20
+#define OEVT_A_DEV_IDLE_EVNT 0x00200000
+#define OEVT_A_DEV_IDLE_EVNT_SHIFT 21
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT 0x00400000
+#define OEVT_HOST_ROLE_REQ_INIT_EVNT_SHIFT 22
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT 0x00800000
+#define OEVT_HOST_ROLE_REQ_CONFIRM_EVNT_SHIFT 23
+#define OEVT_CONN_ID_STS_CHNG_EVNT 0x01000000
+#define OEVT_CONN_ID_STS_CHNG_EVNT_SHIFT 24
+#define OEVT_DEV_MOD_EVNT 0x80000000
+#define OEVT_DEV_MOD_EVNT_SHIFT 31
+
+#define OEVTEN 0xcc0c
+
+#define OEVT_ALL (OEVT_CONN_ID_STS_CHNG_EVNT | \
+ OEVT_HOST_ROLE_REQ_INIT_EVNT | \
+ OEVT_HOST_ROLE_REQ_CONFIRM_EVNT | \
+ OEVT_A_DEV_B_DEV_HOST_END_EVNT | \
+ OEVT_A_DEV_HOST_EVNT | \
+ OEVT_A_DEV_HNP_CHNG_EVNT | \
+ OEVT_A_DEV_SRP_DET_EVNT | \
+ OEVT_A_DEV_SESS_END_DET_EVNT | \
+ OEVT_B_DEV_B_HOST_END_EVNT | \
+ OEVT_B_DEV_HNP_CHNG_EVNT | \
+ OEVT_B_DEV_SES_VLD_DET_EVNT | \
+ OEVT_B_DEV_VBUS_CHNG_EVNT)
+
+#define OSTS 0xcc10
+#define OSTS_CONN_ID_STS 0x0001
+#define OSTS_CONN_ID_STS_SHIFT 0
+#define OSTS_A_SES_VLD 0x0002
+#define OSTS_A_SES_VLD_SHIFT 1
+#define OSTS_B_SES_VLD 0x0004
+#define OSTS_B_SES_VLD_SHIFT 2
+#define OSTS_XHCI_PRT_PWR 0x0008
+#define OSTS_XHCI_PRT_PWR_SHIFT 3
+#define OSTS_PERIP_MODE 0x0010
+#define OSTS_PERIP_MODE_SHIFT 4
+#define OSTS_OTG_STATES 0x0f00
+#define OSTS_OTG_STATE_SHIFT 8
+
+#define DCTL 0xc704
+#define DCTL_RUN_STOP 0x80000000
+
+#define OTG_STATE_INVALID -1
+#define OTG_STATE_EXIT 14
+#define OTG_STATE_TERMINATED 15
+
+#define PERI_MODE_HOST 0
+#define PERI_MODE_PERIPHERAL 1
+
+/** The main structure to keep track of OTG driver state. */
+struct dwc3_otg {
+
+ /** OTG PHY */
+ struct usb_otg otg;
+ struct device *dev;
+ struct dwc3 *dwc;
+
+ void __iomem *regs;
+
+ int main_wakeup_needed;
+ struct task_struct *main_thread;
+ wait_queue_head_t main_wq;
+
+ spinlock_t lock;
+
+ int otg_srp_reqd;
+
+ /* Events */
+ u32 otg_events;
+
+ u32 user_events;
+
+ /** User initiated SRP.
+ *
+ * Valid in B-device during sensing/probing. Initiates SRP signalling
+ * across the bus.
+ *
+ * Also valid as an A-device during probing. This causes the A-device to
+ * apply V-bus manually and check for a device. Can be used if the
+ * device does not support SRP and the host does not support ADP.
+ */
+#define USER_SRP_EVENT 0x1
+ /** User initiated HNP (only valid in B-peripheral) */
+#define USER_HNP_EVENT 0x2
+ /** User has ended the session (only valid in B-peripheral) */
+#define USER_END_SESSION 0x4
+ /** User initiated VBUS. This will cause the A-device to turn on the
+ * VBUS and see if a device will connect (only valid in A-device during
+ * sensing/probing)
+ */
+#define USER_VBUS_ON 0x8
+ /** User has initiated RSP */
+#define USER_RSP_EVENT 0x10
+ /** Host release event */
+#define PCD_RECEIVED_HOST_RELEASE_EVENT 0x20
+ /** Initial SRP */
+#define INITIAL_SRP 0x40
+ /** A-device connected event*/
+#define USER_A_CONN_EVENT 0x80
+ /** User initiated HNP END Session. This will make the A-device and
+ * B-device to return back to their previous roles before HNP got
+ * initiated
+ */
+#define USER_HNP_END_SESSION 0x100
+
+ /* States */
+ enum usb_otg_state prev;
+ enum usb_otg_state state;
+
+ u32 hwparams6;
+ int hcd_irq;
+ int irq;
+ int host_started;
+ int peripheral_started;
+ int dev_enum;
+
+ struct delayed_work hp_work; /* drives HNP polling */
+
+};
+
+extern int usb_port_suspend(struct usb_device *udev, pm_message_t msg);
+extern void usb_kick_hub_wq(struct usb_device *dev);
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
new file mode 100644
index 000000000000..ae659e367804
--- /dev/null
+++ b/drivers/usb/dwc3/platform_data.h
@@ -0,0 +1,54 @@
+/**
+ * platform_data.h - USB DWC3 Platform Data Support
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+
+struct dwc3_platform_data {
+ enum usb_device_speed maximum_speed;
+ enum usb_dr_mode dr_mode;
+ bool usb3_lpm_capable;
+
+ unsigned is_utmi_l1_suspend:1;
+ u8 hird_threshold;
+
+ u8 lpm_nyet_threshold;
+
+ unsigned disable_scramble_quirk:1;
+ unsigned has_lpm_erratum:1;
+ unsigned u2exit_lfps_quirk:1;
+ unsigned u2ss_inp3_quirk:1;
+ unsigned req_p1p2p3_quirk:1;
+ unsigned del_p1p2p3_quirk:1;
+ unsigned del_phy_power_chg_quirk:1;
+ unsigned lfps_filter_quirk:1;
+ unsigned rx_detect_poll_quirk:1;
+ unsigned dis_u3_susphy_quirk:1;
+ unsigned dis_u2_susphy_quirk:1;
+ unsigned dis_enblslpm_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
+
+ unsigned tx_de_emphasis_quirk:1;
+ unsigned tx_de_emphasis:2;
+
+ u32 fladj_value;
+ bool refclk_fladj;
+
+ const char *hsphy_interface;
+};
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5c1eb96a5c57..111fefeacb8c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -721,6 +721,17 @@ static int bos_desc(struct usb_composite_dev *cdev)
USB_HIGH_SPEED_OPERATION |
USB_5GBPS_OPERATION);
ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+ /* Get Controller configuration */
+ if (cdev->gadget->ops->get_config_params) {
+ cdev->gadget->ops->get_config_params(cdev->gadget,
+ &dcd_config_params);
+ } else {
+ dcd_config_params.bU1devExitLat =
+ USB_DEFAULT_U1_DEV_EXIT_LAT;
+ dcd_config_params.bU2DevExitLat =
+ cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+ }
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 36504931b2d1..27d6b481ec63 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -43,14 +43,17 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
/* Start bot.c code */
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu);
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req);
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
{
int ret;
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
if (fu->flags & USBG_BOT_CMD_PEND)
return 0;
- ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+ ret = usb_ep_queue(fu->ep_out, cmd->req, GFP_ATOMIC);
if (!ret)
fu->flags |= USBG_BOT_CMD_PEND;
return ret;
@@ -61,6 +64,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
+ release_cmd_request(fu, req);
transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__);
@@ -136,7 +140,7 @@ static void bot_send_bad_status(struct usbg_cmd *cmd)
}
req->complete = bot_err_compl;
req->context = cmd;
- req->buf = fu->cmd.buf;
+ req->buf = fu->cmd[0]->buf;
usb_ep_queue(ep, req, GFP_KERNEL);
} else {
bot_enqueue_sense_code(fu, cmd);
@@ -245,7 +249,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
{
struct f_uas *fu = cmd->fu;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct usb_gadget *gadget = fuas_to_gadget(fu);
int ret;
init_completion(&cmd->write_complete);
@@ -256,22 +259,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
return -EINVAL;
}
- if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
- if (!cmd->data_buf)
- return -ENOMEM;
-
- fu->bot_req_out->buf = cmd->data_buf;
- } else {
- fu->bot_req_out->buf = NULL;
- fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
- fu->bot_req_out->sg = se_cmd->t_data_sg;
- }
-
- fu->bot_req_out->complete = usbg_data_write_cmpl;
- fu->bot_req_out->length = se_cmd->data_length;
- fu->bot_req_out->context = cmd;
-
ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
if (ret)
goto cleanup;
@@ -297,11 +284,84 @@ static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = bot_submit_command(fu, req->buf, req->actual);
if (ret)
pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
}
+static struct usbg_cdb *acquire_cmd_request(struct f_uas *fu)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (!fu->cmd[i]->claimed) {
+ fu->cmd[i]->claimed = true;
+ return fu->cmd[i];
+ }
+ }
+ return NULL;
+}
+
+static void release_cmd_request(struct f_uas *fu, struct usb_request *req)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req == req)
+ fu->cmd[i]->claimed = false;
+ }
+}
+
+static void free_cmd_resource(struct f_uas *fu, struct usb_ep *ep)
+{
+ int i;
+
+ for (i = 0; i < fu->ncmd; i++) {
+ if (fu->cmd[i]->req)
+ usb_ep_free_request(ep, fu->cmd[i]->req);
+
+ kfree(fu->cmd[i]->buf);
+ fu->cmd[i]->buf = NULL;
+
+ kfree(fu->cmd[i]);
+ fu->cmd[i] = NULL;
+ }
+}
+
+static int alloc_cmd_resource(struct f_uas *fu, int num, struct usb_ep *ep,
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req))
+{
+ int i;
+
+ fu->ncmd = num;
+ for (i = 0; i < fu->ncmd; i++) {
+ fu->cmd[i] = kcalloc(fu->ncmd, sizeof(struct usbg_cdb),
+ GFP_KERNEL);
+ if (!fu->cmd)
+ goto err_cmd;
+
+ fu->cmd[i]->req = usb_ep_alloc_request(ep, GFP_KERNEL);
+ if (!fu->cmd[i]->req)
+ goto err_cmd;
+
+ fu->cmd[i]->buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+ if (!fu->cmd[i]->buf)
+ goto err_cmd;
+
+ fu->cmd[i]->req->complete = complete;
+ fu->cmd[i]->req->buf = fu->cmd[i]->buf;
+ fu->cmd[i]->req->length = fu->ep_out->maxpacket;
+ fu->cmd[i]->req->context = fu;
+ }
+
+ return 0;
+err_cmd:
+ free_cmd_resource(fu, ep);
+ return -ENOMEM;
+}
+
static int bot_prepare_reqs(struct f_uas *fu)
{
int ret;
@@ -314,10 +374,6 @@ static int bot_prepare_reqs(struct f_uas *fu)
if (!fu->bot_req_out)
goto err_out;
- fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err_cmd;
-
fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
if (!fu->bot_status.req)
goto err_sts;
@@ -327,28 +383,20 @@ static int bot_prepare_reqs(struct f_uas *fu)
fu->bot_status.req->complete = bot_status_complete;
fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
- fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = bot_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_out->maxpacket;
- fu->cmd.req->context = fu;
+ ret = alloc_cmd_resource(fu, BOT_MAX_COMMANDS, fu->ep_out,
+ bot_cmd_complete);
+ if (ret)
+ goto err_cmd;
ret = bot_enqueue_cmd_cbw(fu);
if (ret)
goto err_queue;
return 0;
err_queue:
- kfree(fu->cmd.buf);
- fu->cmd.buf = NULL;
-err_buf:
+ free_cmd_resource(fu, fu->ep_out);
+err_cmd:
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
err_sts:
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
- fu->cmd.req = NULL;
-err_cmd:
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
fu->bot_req_out = NULL;
err_out:
@@ -372,16 +420,13 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
- usb_ep_free_request(fu->ep_out, fu->cmd.req);
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
- kfree(fu->cmd.buf);
+ free_cmd_resource(fu, fu->ep_out);
fu->bot_req_in = NULL;
fu->bot_req_out = NULL;
- fu->cmd.req = NULL;
fu->bot_status.req = NULL;
- fu->cmd.buf = NULL;
}
static void bot_set_alt(struct f_uas *fu)
@@ -480,14 +525,6 @@ static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
stream->req_status = NULL;
}
-static void uasp_free_cmdreq(struct f_uas *fu)
-{
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
- kfree(fu->cmd.buf);
- fu->cmd.req = NULL;
- fu->cmd.buf = NULL;
-}
-
static void uasp_cleanup_old_alt(struct f_uas *fu)
{
int i;
@@ -502,7 +539,7 @@ static void uasp_cleanup_old_alt(struct f_uas *fu)
for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
uasp_cleanup_one_stream(fu, &fu->stream[i]);
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
}
static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
@@ -565,6 +602,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct uas_stream *stream = cmd->stream;
struct f_uas *fu = cmd->fu;
+ struct usbg_cdb *cmd_cdb;
int ret;
if (req->status < 0)
@@ -599,7 +637,8 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
case UASP_QUEUE_COMMAND:
transport_generic_free_cmd(&cmd->se_cmd, 0);
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd_cdb = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd_cdb->req, GFP_ATOMIC);
break;
default:
@@ -719,11 +758,13 @@ static int usbg_submit_command(struct f_uas *, void *, unsigned int);
static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_uas *fu = req->context;
+ struct usbg_cdb *cmd;
int ret;
if (req->status < 0)
return;
+ release_cmd_request(fu, req);
ret = usbg_submit_command(fu, req->buf, req->actual);
/*
* Once we tune for performance enqueue the command req here again so
@@ -733,7 +774,8 @@ static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
*/
if (!ret)
return;
- usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+ cmd = acquire_cmd_request(fu);
+ usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
}
static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
@@ -761,28 +803,6 @@ out:
return -ENOMEM;
}
-static int uasp_alloc_cmd(struct f_uas *fu)
-{
- fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
- if (!fu->cmd.req)
- goto err;
-
- fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
- if (!fu->cmd.buf)
- goto err_buf;
-
- fu->cmd.req->complete = uasp_cmd_complete;
- fu->cmd.req->buf = fu->cmd.buf;
- fu->cmd.req->length = fu->ep_cmd->maxpacket;
- fu->cmd.req->context = fu;
- return 0;
-
-err_buf:
- usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
-err:
- return -ENOMEM;
-}
-
static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
{
int i;
@@ -800,12 +820,15 @@ static int uasp_prepare_reqs(struct f_uas *fu)
{
int ret;
int i;
- int max_streams;
+ int max_streams, max_commands;
- if (fu->flags & USBG_USE_STREAMS)
+ if (fu->flags & USBG_USE_STREAMS) {
+ max_commands = UASP_MAX_COMMANDS;
max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
- else
+ } else {
+ max_commands = 1;
max_streams = 1;
+ }
for (i = 0; i < max_streams; i++) {
ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
@@ -813,19 +836,25 @@ static int uasp_prepare_reqs(struct f_uas *fu)
goto err_cleanup;
}
- ret = uasp_alloc_cmd(fu);
+ ret = alloc_cmd_resource(fu, max_commands, fu->ep_cmd,
+ uasp_cmd_complete);
if (ret)
goto err_free_stream;
uasp_setup_stream_res(fu, max_streams);
- ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
- if (ret)
- goto err_free_stream;
+ /* queue number of commands */
+ for (i = 0; i < fu->ncmd; i++) {
+ struct usbg_cdb *cmd = acquire_cmd_request(fu);
+
+ ret = usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
+ if (ret)
+ goto err_free_stream;
+ }
return 0;
err_free_stream:
- uasp_free_cmdreq(fu);
+ free_cmd_resource(fu, fu->ep_cmd);
err_cleanup:
if (i) {
@@ -838,16 +867,28 @@ err_cleanup:
return ret;
}
+#define SS_BOT_INTERFACE_DESC_NO 5
static void uasp_set_alt(struct f_uas *fu)
{
struct usb_function *f = &fu->function;
struct usb_gadget *gadget = f->config->cdev->gadget;
+ struct usb_descriptor_header **ss_uasp_backup = f->ss_descriptors;
int ret;
fu->flags = USBG_IS_UAS;
- if (gadget->speed >= USB_SPEED_SUPER)
+ if (gadget->speed == USB_SPEED_SUPER) {
fu->flags |= USBG_USE_STREAMS;
+ /* If device connect in SS then comp_descriptor with stream
+ * should be attached to descriptor. Since BOT and UAS using
+ * same endpoint, config_ep_by_speed will returns first match
+ * with comp_descriptor without stream. This is just workaround
+ * proper fix need to be introduced. Here advancing descritor
+ * header ss_descriptors with number of descriptor present in
+ * BOT mode.
+ */
+ f->ss_descriptors += SS_BOT_INTERFACE_DESC_NO;
+ }
config_ep_by_speed(gadget, f, fu->ep_in);
ret = usb_ep_enable(fu->ep_in);
@@ -873,6 +914,10 @@ static void uasp_set_alt(struct f_uas *fu)
goto err_wq;
fu->flags |= USBG_ENABLED;
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
+
pr_info("Using the UAS protocol\n");
return;
err_wq:
@@ -884,6 +929,9 @@ err_cmd:
err_b_out:
usb_ep_disable(fu->ep_in);
err_b_in:
+ /* restore ss_descriptors */
+ if (gadget->speed == USB_SPEED_SUPER)
+ f->ss_descriptors = ss_uasp_backup;
fu->flags = 0;
}
@@ -949,6 +997,56 @@ static int get_cmd_dir(const unsigned char *cdb)
return ret;
}
+static void recover_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ rem = se_cmd->data_length % fu->ep_out->maxpacket;
+ if (rem) {
+ /* recover paded data length */
+ cmd->data_len -= fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length -= fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
+static void adjust_w_length_with_maxpacket(struct usbg_cmd *cmd,
+ struct usb_request *req)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct f_uas *fu = cmd->fu;
+ struct usb_gadget *gadget = fuas_to_gadget(fu);
+ int rem;
+
+ cmd->data_len = se_cmd->data_length;
+ rem = cmd->data_len % fu->ep_out->maxpacket;
+ if (rem) {
+ /* pad data length so that transfer size can be in multiple of
+ * max packet size
+ */
+ cmd->data_len += fu->ep_out->maxpacket - rem;
+
+ if (gadget->sg_supported) {
+ /* if sg is supported and data length in page also need
+ * to be adjusted as multiple of max packet size.
+ */
+ struct scatterlist *s = sg_last(se_cmd->t_data_sg,
+ se_cmd->t_data_nents);
+
+ s->length += fu->ep_out->maxpacket - rem;
+ }
+ }
+}
+
static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
{
struct usbg_cmd *cmd = req->context;
@@ -959,6 +1057,8 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
goto cleanup;
}
+ recover_w_length_with_maxpacket(cmd, req);
+
if (req->num_sgs == 0) {
sg_copy_from_buffer(se_cmd->t_data_sg,
se_cmd->t_data_nents,
@@ -979,8 +1079,10 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
struct f_uas *fu = cmd->fu;
struct usb_gadget *gadget = fuas_to_gadget(fu);
+ adjust_w_length_with_maxpacket(cmd, req);
+
if (!gadget->sg_supported) {
- cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+ cmd->data_buf = kmalloc(cmd->data_len, GFP_ATOMIC);
if (!cmd->data_buf)
return -ENOMEM;
@@ -992,7 +1094,7 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
}
req->complete = usbg_data_write_cmpl;
- req->length = se_cmd->data_length;
+ req->length = cmd->data_len;
req->context = cmd;
return 0;
}
@@ -1185,7 +1287,8 @@ static void bot_cmd_work(struct work_struct *work)
if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0) < 0)
+ cmd->data_len, cmd->prio_attr, dir,
+ TARGET_SCF_ACK_KREF) < 0)
goto out;
return;
@@ -1674,9 +1777,11 @@ static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
CONFIGFS_ATTR(tcm_usbg_tpg_, enable);
CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
+static struct configfs_attribute tcm_usbg_tpg_attr_maxburst;
static struct configfs_attribute *usbg_base_attrs[] = {
&tcm_usbg_tpg_attr_enable,
&tcm_usbg_tpg_attr_nexus,
+ &tcm_usbg_tpg_attr_maxburst,
NULL,
};
@@ -1984,6 +2089,32 @@ static struct usb_gadget_strings *tcm_strings[] = {
NULL,
};
+static ssize_t tcm_usbg_tpg_maxburst_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", uasp_cmd_comp_desc.bMaxBurst);
+}
+
+static ssize_t tcm_usbg_tpg_maxburst_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ int value;
+ int ret;
+
+ ret = kstrtouint(page, 10, &value);
+ if (ret)
+ return ret;
+
+ uasp_bi_ep_comp_desc.bMaxBurst = value;
+ uasp_bo_ep_comp_desc.bMaxBurst = value;
+ uasp_status_in_ep_comp_desc.bMaxBurst = value;
+ uasp_cmd_comp_desc.bMaxBurst = value;
+ bot_bi_ep_comp_desc.bMaxBurst = value;
+ bot_bo_ep_comp_desc.bMaxBurst = value;
+
+ return count;
+}
+CONFIGFS_ATTR(tcm_usbg_tpg_, maxburst);
+
static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct f_uas *fu = to_f_uas(f);
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index 3cd565794ad7..54ed3bca8add 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -98,6 +98,7 @@ struct uas_stream {
struct usbg_cdb {
struct usb_request *req;
void *buf;
+ bool claimed;
};
struct bot_status {
@@ -105,6 +106,9 @@ struct bot_status {
struct bulk_cs_wrap csw;
};
+#define UASP_MAX_COMMANDS 6
+#define BOT_MAX_COMMANDS 1
+#define MAX_COMMANDS UASP_MAX_COMMANDS
struct f_uas {
struct usbg_tpg *tpg;
struct usb_function function;
@@ -117,7 +121,8 @@ struct f_uas {
#define USBG_IS_BOT (1 << 3)
#define USBG_BOT_CMD_PEND (1 << 4)
- struct usbg_cdb cmd;
+ u32 ncmd;
+ struct usbg_cdb *cmd[MAX_COMMANDS];
struct usb_ep *ep_in;
struct usb_ep *ep_out;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 5c042f380708..b7aa006ba98c 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -332,11 +332,15 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags);
if (ret < 0) {
+ printk(KERN_INFO "Failed to queue request (%d)\n", ret);
+ usb_ep_set_halt(video->ep);
uvcg_queue_cancel(queue, 0);
break;
}
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index b1cfc8279c3d..834464084ea9 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -11,6 +11,7 @@
* USB peripheral controller (at91_udc.c).
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -171,6 +172,7 @@ struct xusb_ep {
* @addr: the usb device base address
* @lock: instance of spinlock
* @dma_enabled: flag indicating whether the dma is included in the system
+ * @clk: pointer to struct clk
* @read_fn: function pointer to read device registers
* @write_fn: function pointer to write to device registers
*/
@@ -188,8 +190,9 @@ struct xusb_udc {
void __iomem *addr;
spinlock_t lock;
bool dma_enabled;
+ struct clk *clk;
- unsigned int (*read_fn)(void __iomem *);
+ unsigned int (*read_fn)(void __iomem *reg);
void (*write_fn)(void __iomem *, u32, u32);
};
@@ -1731,7 +1734,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
*
* Process setup packet and delegate to gadget layer.
*/
-static void xudc_handle_setup(struct xusb_udc *udc)
+static void xudc_handle_setup(struct xusb_udc *udc) __must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
@@ -2091,6 +2094,26 @@ static int xudc_probe(struct platform_device *pdev)
udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
udc->gadget.name = driver_name;
+ udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(udc->clk)) {
+ if (PTR_ERR(udc->clk) != -ENOENT) {
+ ret = PTR_ERR(udc->clk);
+ goto fail;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ udc->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(udc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
spin_lock_init(&udc->lock);
/* Check for IP endianness */
@@ -2146,10 +2169,62 @@ static int xudc_remove(struct platform_device *pdev)
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
+ clk_disable_unprepare(udc->clk);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->disconnect)
+ udc->driver->disconnect(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ clk_enable(udc->clk);
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
@@ -2161,6 +2236,7 @@ static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove = xudc_remove,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 67a6ee8cb5d8..3cefa3cb6c5a 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -32,6 +32,8 @@
* There are cases when the host controller fails to enable the port due to,
* for example, insufficient power that can be supplied to the device from
* the USB bus. In those cases, the messages printed here are not helpful.
+ *
+ * Return: Always return 0
*/
static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
{
@@ -46,11 +48,9 @@ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
dev_warn(hcd->self.controller,
"Maybe your device is not a high speed device?\n");
dev_warn(hcd->self.controller,
- "The USB host controller does not support full speed "
- "nor low speed devices\n");
+ "USB host controller doesn't support FS/LS devices\n");
dev_warn(hcd->self.controller,
- "You can reconfigure the host controller to have "
- "full speed support\n");
+ "You can reconfigure host controller to support FS\n");
}
return 0;
@@ -112,6 +112,8 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* host controller. Because the Xilinx USB host controller can be configured
* as HS only or HS/FS only, it checks the configuration in the device tree
* entry, and sets an appropriate value for hcd->has_tt.
+ *
+ * Return: zero on success, 'rv' value on failure
*/
static int ehci_hcd_xilinx_of_probe(struct platform_device *op)
{
@@ -196,6 +198,8 @@ err_irq:
*
* Remove the hcd structure, and release resources that has been requested
* during probe.
+ *
+ * Return: Always return 0
*/
static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
{
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index f37316d2c8fa..dd82b67e1f26 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1825,6 +1825,13 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
}
+ /* After resuming back from suspend, the controller may not initiate
+ * LFPS.U3_exit signalling if not given a delay after updating the
+ * link from U3->U0. So, lets wait for atleast 1ms
+ */
+ if (next_state == XDEV_U0)
+ mdelay(1);
+
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9764122c9cdf..7be13f1fcb79 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -685,6 +685,16 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
+ if (xhci->quirks & XHCI_STREAM_QUIRK) {
+ /* dwc3 host controller has an issue where it doesn't
+ * process BULK IN stream rings even after ringing
+ * DoorBell, so setup a timer to aviod hang condition.
+ */
+ timer_setup(&cur_ring->stream_timer,
+ xhci_stream_timeout, 0);
+ cur_ring->xhci = xhci;
+ }
+
ret = xhci_update_stream_mapping(cur_ring, mem_flags);
if (ret) {
xhci_ring_free(xhci, cur_ring);
@@ -771,6 +781,10 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
+
+ if (xhci->quirks & XHCI_STREAM_QUIRK)
+ del_timer_sync(&cur_ring->stream_timer);
+
if (cur_ring) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ca82e2c61ddc..fa9891004729 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,6 +19,8 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/xhci_pdriver.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -153,6 +155,35 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static int usb_otg_set_host(struct device *dev, struct usb_hcd *hcd, bool yes)
+{
+ int ret = 0;
+
+ hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB3);
+ if (!IS_ERR_OR_NULL(hcd->usb_phy) && hcd->usb_phy->otg) {
+ if (yes) {
+ if (otg_set_host(hcd->usb_phy->otg, &hcd->self)) {
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+ } else {
+ ret = otg_set_host(hcd->usb_phy->otg, NULL);
+ usb_put_phy(hcd->usb_phy);
+ goto disable_phy;
+ }
+
+ } else {
+ goto disable_phy;
+ }
+
+ return 0;
+
+disable_phy:
+ hcd->usb_phy = NULL;
+
+ return ret;
+}
+
static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
@@ -208,6 +239,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
return ret;
}
+ /* Set the controller as wakeup capable */
+ device_set_wakeup_capable(&pdev->dev, true);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
@@ -279,6 +313,13 @@ static int xhci_plat_probe(struct platform_device *pdev)
/* Iterate over all parent nodes for finding quirks */
for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
+ if (device_property_read_bool(&pdev->dev, "xhci-stream-quirk"))
+ xhci->quirks |= XHCI_STREAM_QUIRK;
+
+ if (device_property_read_bool(&pdev->dev,
+ "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
@@ -317,6 +358,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto dealloc_usb2_hcd;
+ ret = usb_otg_set_host(&pdev->dev, hcd, true);
+ if (ret)
+ goto dealloc_usb2_hcd;
+
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
@@ -369,6 +414,8 @@ static int xhci_plat_remove(struct platform_device *dev)
xhci->shared_hcd = NULL;
usb_phy_shutdown(hcd->usb_phy);
+ usb_otg_set_host(&dev->dev, hcd, false);
+
usb_remove_hcd(hcd);
usb_put_hcd(shared_hcd);
@@ -388,6 +435,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+#if IS_ENABLED(CONFIG_USB_DWC3_OF_SIMPLE)
+ /* Inform dwc3 driver about the device wakeup capability */
+ if (device_may_wakeup(&hcd->self.root_hub->dev)) {
+ enable_irq_wake(hcd->irq);
+ dwc3_host_wakeup_capable(dev, true);
+ } else {
+ dwc3_host_wakeup_capable(dev, false);
+ }
+#endif
+
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend. Since xhci_plat_suspend is currently
@@ -433,6 +490,9 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (device_may_wakeup(&hcd->self.root_hub->dev))
+ disable_irq_wake(hcd->irq);
+
return xhci_resume(xhci, 0);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 2c255d0620b0..6acd750a08a3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -840,9 +840,21 @@ remove_finished_td:
*/
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
+
inc_td_cnt(cur_td->urb);
- if (last_td_in_urb(cur_td))
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+
+ if (last_td_in_urb(cur_td)) {
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep_ring->stream_timeout_handler == true)) {
+ /* We get here if stream timer time-out and stop
+ * command is issued. Send urb status as -EAGAIN
+ * so that the same urb can be re-submitted.
+ */
+ xhci_giveback_urb_in_irq(xhci, cur_td, -EAGAIN);
+ ep_ring->stream_timeout_handler = false;
+ } else
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ }
/* Stop processing the cancelled list if the watchdog timer is
* running.
@@ -951,6 +963,84 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
+/* This function is called when the stream ring timer gets timedout.
+ * dwc3 host controller has an issue where it doesn't process the BULK IN
+ * stream ring TD's(once in a while) even after ringing DoorBell for that
+ * stream ring. Because of this behaviour there will be no transfer events
+ * generated by the controller on the stream ring, resulting in the hang
+ * condition. xhci_stream_timeout() solves this issue by sending a stop
+ * command on the stream ring after stream timer gets timedout.
+ */
+void xhci_stream_timeout(struct timer_list *arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_ring *ep_ring;
+ unsigned int slot_id, ep_index, stream_id;
+ struct xhci_td *td = NULL;
+ struct urb *urb = NULL;
+ struct urb_priv *urb_priv;
+ struct xhci_command *command;
+ unsigned long flags;
+ int i;
+
+ ep_ring = from_timer(ep_ring, arg, stream_timer);
+ xhci = ep_ring->xhci;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+
+ if (!list_empty(&ep_ring->td_list)) {
+ td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+ urb = td->urb;
+ urb_priv = urb->hcpriv;
+
+ slot_id = urb->dev->slot_id;
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ stream_id = ep_ring->stream_id;
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep_ring->stream_timeout_handler = true;
+
+ /* Delete the stream ring timer */
+ del_timer(&ep_ring->stream_timer);
+
+ for (i = 0; i < urb_priv->num_tds; i++) {
+ td = &urb_priv->td[i];
+ list_add_tail(&td->cancelled_td_list,
+ &ep->cancelled_td_list);
+ }
+
+ /* Queue a stop endpoint command, but only if this is
+ * the first cancellation to be handled.
+ */
+ if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
+ command = xhci_alloc_command(xhci, false,
+ GFP_ATOMIC);
+ if (!command) {
+ xhci_warn(xhci,
+ "%s: Failed to allocate command\n",
+ __func__);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ ep->ep_state |= EP_STOP_CMD_PENDING;
+ ep->stop_cmd_timer.expires = jiffies +
+ XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+ add_timer(&ep->stop_cmd_timer);
+ xhci_queue_stop_endpoint(xhci, command,
+ urb->dev->slot_id, ep_index, 0);
+ xhci_ring_cmd_db(xhci);
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ /* let the SCSI stack take care */
+ del_timer(&ep_ring->stream_timer);
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -2403,6 +2493,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num++;
}
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) &&
+ (ep->ep_state & EP_HAS_STREAMS))
+ del_timer(&ep_ring->stream_timer);
+
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
@@ -3454,6 +3548,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
check_trb_math(urb, enqd_len);
+
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0) &&
+ (usb_endpoint_dir_in(&urb->ep->desc) == 1)) {
+ /* Start the stream timer so that xhci_stream_timeout() can be
+ * triggered if xhci is stuck while processing BULK IN streams.
+ */
+ ring->stream_timeout_handler = false;
+ mod_timer(&ring->stream_timer, jiffies + 5 * HZ);
+ }
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ed468eed299c..caa90b487e25 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -183,7 +183,11 @@ int xhci_reset(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = readl(&xhci->op_regs->command);
+#ifdef CONFIG_USB_DWC3_OTG
+ command |= CMD_LRESET;
+#else
command |= CMD_RESET;
+#endif
writel(command, &xhci->op_regs->command);
/* Existing Intel xHCI controllers require a delay of 1 mS,
@@ -197,7 +201,12 @@ int xhci_reset(struct xhci_hcd *xhci)
udelay(1000);
ret = xhci_handshake(&xhci->op_regs->command,
- CMD_RESET, 0, 10 * 1000 * 1000);
+#ifdef CONFIG_USB_DWC3_OTG
+ CMD_LRESET,
+#else
+ CMD_RESET,
+#endif
+ 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -718,6 +727,11 @@ static void xhci_stop(struct usb_hcd *hcd)
/* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
+ /* Remove shared_hcd if no otg ports are present */
+ if (!hcd->self.otg_port) {
+ /* usb core will free this hcd shortly, unset pointer */
+ xhci->shared_hcd = NULL;
+ }
mutex_unlock(&xhci->mutex);
return;
}
@@ -1670,8 +1684,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto err_giveback;
}
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep_ring) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Delete the stream timer */
+ if ((xhci->quirks & XHCI_STREAM_QUIRK) && (urb->stream_id > 0))
+ del_timer(&ep_ring->stream_timer);
+
i = urb_priv->num_tds_done;
if (i < urb_priv->num_tds)
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cancel URB %p, dev %s, ep 0x%x, "
"starting at offset 0x%llx",
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index c80710e47476..a37618cbc8f1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1614,6 +1614,9 @@ struct xhci_ring {
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
+ struct timer_list stream_timer;
+ bool stream_timeout_handler;
+ struct xhci_hcd *xhci;
};
struct xhci_erst_entry {
@@ -1873,6 +1876,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_STREAM_QUIRK BIT_ULL(33) /* FIXME this is wrong */
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2120,6 +2124,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td);
void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
+void xhci_stream_timeout(struct timer_list *unused);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 4b3fa78995cf..11ed22344794 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -173,6 +173,7 @@ config USB_TEGRA_PHY
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM || ARM64 || COMPILE_TEST
+ depends on USB_PHY
select USB_ULPI_VIEWPORT
help
Enable this to support ULPI connected USB OTG transceivers which
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index e683a37e3a7a..2a6d9b95febd 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -13,9 +13,16 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
+#include <linux/usb/phy.h>
struct ulpi_info {
@@ -39,6 +46,13 @@ static struct ulpi_info ulpi_ids[] = {
ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"),
};
+struct ulpi_phy {
+ struct usb_phy *usb_phy;
+ void __iomem *regs;
+ unsigned int vp_offset;
+ unsigned int flags;
+};
+
static int ulpi_set_otg_flags(struct usb_phy *phy)
{
unsigned int flags = ULPI_OTG_CTRL_DP_PULLDOWN |
@@ -240,6 +254,23 @@ static int ulpi_set_vbus(struct usb_otg *otg, bool on)
return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
}
+static int usbphy_set_vbus(struct usb_phy *phy, int on)
+{
+ unsigned int flags = usb_phy_io_read(phy, ULPI_OTG_CTRL);
+
+ flags &= ~(ULPI_OTG_CTRL_DRVVBUS | ULPI_OTG_CTRL_DRVVBUS_EXT);
+
+ if (on) {
+ if (phy->flags & ULPI_OTG_DRVVBUS)
+ flags |= ULPI_OTG_CTRL_DRVVBUS;
+
+ if (phy->flags & ULPI_OTG_DRVVBUS_EXT)
+ flags |= ULPI_OTG_CTRL_DRVVBUS_EXT;
+ }
+
+ return usb_phy_io_write(phy, flags, ULPI_OTG_CTRL);
+}
+
static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
struct usb_phy_io_ops *ops,
unsigned int flags)
@@ -249,6 +280,7 @@ static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
phy->io_ops = ops;
phy->otg = otg;
phy->init = ulpi_init;
+ phy->set_vbus = usbphy_set_vbus;
otg->usb_phy = phy;
otg->set_host = ulpi_set_host;
@@ -301,3 +333,71 @@ devm_otg_ulpi_create(struct device *dev,
return phy;
}
EXPORT_SYMBOL_GPL(devm_otg_ulpi_create);
+
+static int ulpi_phy_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct ulpi_phy *uphy;
+ bool flag;
+ int ret;
+
+ uphy = devm_kzalloc(&pdev->dev, sizeof(*uphy), GFP_KERNEL);
+ if (!uphy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ uphy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (IS_ERR(uphy->regs))
+ return PTR_ERR(uphy->regs);
+
+ ret = of_property_read_u32(np, "view-port", &uphy->vp_offset);
+ if (IS_ERR(uphy->regs)) {
+ dev_err(&pdev->dev, "view-port register not specified\n");
+ return PTR_ERR(uphy->regs);
+ }
+
+ flag = of_property_read_bool(np, "drv-vbus");
+ if (flag)
+ uphy->flags |= ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT;
+
+ uphy->usb_phy = otg_ulpi_create(&ulpi_viewport_access_ops, uphy->flags);
+
+ uphy->usb_phy->dev = &pdev->dev;
+
+ uphy->usb_phy->io_priv = uphy->regs + uphy->vp_offset;
+
+ ret = usb_add_phy_dev(uphy->usb_phy);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ulpi_phy_remove(struct platform_device *pdev)
+{
+ struct ulpi_phy *uphy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(uphy->usb_phy);
+
+ return 0;
+}
+
+static const struct of_device_id ulpi_phy_table[] = {
+ { .compatible = "ulpi-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ulpi_phy_table);
+
+static struct platform_driver ulpi_phy_driver = {
+ .probe = ulpi_phy_probe,
+ .remove = ulpi_phy_remove,
+ .driver = {
+ .name = "ulpi-phy",
+ .of_match_table = ulpi_phy_table,
+ },
+};
+module_platform_driver(ulpi_phy_driver);
+
+MODULE_DESCRIPTION("ULPI PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index d592071119ba..da9a3588d209 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -80,6 +80,8 @@ static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
+static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd);
/*
* This driver needs its own workqueue, as we need to control memory allocation.
@@ -296,18 +298,286 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
return response_code == RC_TMF_SUCCEEDED;
}
+static void dummy_scsi_done(struct scsi_cmnd *cmnd)
+{
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+
+ devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
+ kfree(cmnd->request);
+ kfree(cmnd);
+}
+
+static void uas_workaround_cmplt(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd;
+ struct uas_cmd_info *cmdinfo;
+
+ if ((urb->context != NULL) && (urb->status == 0)) {
+ cmnd = urb->context;
+ cmdinfo = (struct uas_cmd_info *)&cmnd->SCp;
+
+ if (cmdinfo->data_in_urb != urb)
+ cmnd->scsi_done(cmnd);
+ }
+
+ usb_free_urb(urb);
+}
+
+static struct urb *uas_workaround_cmnd(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdev = cmnd->device;
+ struct urb *urb;
+ int err;
+
+ urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd);
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate cmnd URB\n", __func__);
+ return NULL;
+ }
+
+ err = usb_submit_urb(urb, gfp);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit cmd, err=%d\n", __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->cmd_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+
+}
+
+static struct urb *uas_workaround_data(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct scsi_data_buffer *sdb = NULL;
+ void *temp_buf;
+ unsigned int pipe;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ cmdinfo->data_in_urb = urb;
+ sdb = &cmnd->sdb;
+ pipe = devinfo->data_in_pipe;
+ temp_buf = kzalloc(sdb->length, GFP_ATOMIC);
+ if (!temp_buf) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory\n", __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, pipe, temp_buf, sdb->length,
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Data In urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+static struct urb *uas_workaround_sense(struct uas_dev_info *devinfo, gfp_t gfp,
+ struct scsi_cmnd *cmnd)
+{
+ struct scsi_device *sdev = cmnd->device;
+ struct usb_device *udev = devinfo->udev;
+ struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+ struct sense_iu *iu;
+ int err;
+
+ if (!urb) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate URB\n", __func__);
+ return NULL;
+ }
+
+ iu = kzalloc(sizeof(*iu), gfp);
+ if (!iu) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for sense_iu\n",
+ __func__);
+ goto free;
+ }
+
+ usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
+ uas_workaround_cmplt, cmnd);
+ if (devinfo->use_streams)
+ urb->stream_id = cmdinfo->uas_tag;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to submit Sense urb, err = %d\n",
+ __func__, err);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->sense_urbs);
+ return urb;
+
+free:
+ usb_free_urb(urb);
+ return NULL;
+}
+
+/*
+ * This function is called only if the DATA IN stream timer expired, which
+ * means xhci host controller has failed to process the TRB's present in the
+ * stream ring. As a part of recovery sequence, this function re-submits the
+ * previous stopped urb on which xhci failed to process data and along with
+ * that urb it prepares & submits sense, data and cmnd urb with scsi command
+ * set to standard inquiry request containing the next free stream id tag.
+ * Doing so will make the xhci start processing the previous stopped urb
+ * along with the urb that has standard inquiry scsi command.
+ */
+static int uas_workaround(struct urb *urb)
+{
+ struct scsi_cmnd *cmnd = urb->context;
+ struct scsi_device *sdev = cmnd->device;
+ struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
+ struct scsi_cmnd *temp_cmnd;
+ struct uas_cmd_info *temp_cmdinfo;
+ struct urb *sense_urb, *data_urb, *cmnd_urb;
+ struct request *temp_request;
+ unsigned int idx;
+ int err;
+ char inquiry[16] = { 0x12, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 };
+
+
+ /* Find a free uas-tag */
+ for (idx = 0; idx < devinfo->qdepth; idx++) {
+ if (!devinfo->cmnd[idx])
+ break;
+ }
+
+ if (idx == devinfo->qdepth) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to find free tag\n", __func__);
+ err = -EINVAL;
+ goto free;
+ }
+
+ /* Create a scsi_cmnd and send dummy inquiry data on the next
+ * available tag
+ */
+ temp_cmnd = kzalloc(sizeof(struct scsi_cmnd), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for scsi_cmnd\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_request = kzalloc(sizeof(struct request), GFP_ATOMIC);
+ if (!temp_cmnd) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: Failed to allocate memory for request\n",
+ __func__);
+ err = -ENOMEM;
+ goto free;
+ }
+
+ temp_cmnd->device = cmnd->device;
+ temp_cmnd->cmnd = inquiry;
+ temp_cmnd->cmd_len = 16;
+ temp_cmnd->sdb.length = 0x10;
+ temp_cmnd->scsi_done = dummy_scsi_done;
+ temp_request->tag = idx;
+ temp_cmnd->request = temp_request;
+
+ temp_cmdinfo = (struct uas_cmd_info *)&temp_cmnd->SCp;
+ memset(temp_cmdinfo, 0, sizeof(struct uas_cmd_info));
+
+ temp_cmdinfo->uas_tag = idx + 1;
+ devinfo->cmnd[idx] = temp_cmnd;
+
+ /* Submit previously stopped URB first */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ shost_printk(KERN_INFO, sdev->host,
+ "%s: submit err %d\n", __func__, err);
+ kfree(temp_cmnd);
+ kfree(temp_request);
+ goto free;
+ }
+ usb_anchor_urb(urb, &devinfo->data_urbs);
+
+ /* Allocate and submit SENSE urb for next available tag */
+ sense_urb = uas_workaround_sense(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!sense_urb) {
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit DATA IN urb for next available tag */
+ data_urb = uas_workaround_data(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!data_urb) {
+ /* Kill previously allocated sense urb */
+ sense_urb->context = NULL;
+ usb_kill_urb(sense_urb);
+ usb_put_urb(sense_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ goto free;
+ }
+
+ /* Allocate and submit CMND urb with dummy inquiry data */
+ cmnd_urb = uas_workaround_cmnd(devinfo, GFP_ATOMIC, temp_cmnd);
+ if (!cmnd_urb) {
+ /* Kill previously allocated data urb */
+ data_urb->context = NULL;
+ usb_kill_urb(data_urb);
+ usb_put_urb(data_urb);
+ kfree(temp_request);
+ kfree(temp_cmnd);
+ }
+
+free:
+ return err;
+}
+
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
- struct Scsi_Host *shost = urb->context;
- struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)urb->context;
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)cmnd->device->hostdata;
struct urb *data_in_urb = NULL;
struct urb *data_out_urb = NULL;
- struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
int status = urb->status;
+ int err;
bool success;
spin_lock_irqsave(&devinfo->lock, flags);
@@ -316,6 +586,21 @@ static void uas_stat_cmplt(struct urb *urb)
goto out;
if (status) {
+ if (status == -EAGAIN) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
@@ -398,10 +683,27 @@ static void uas_data_cmplt(struct urb *urb)
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = &cmnd->sdb;
unsigned long flags;
+ int err;
int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
+ if ((status == -EAGAIN) && (!devinfo->resetting) &&
+ (cmdinfo->data_in_urb == urb)) {
+ /* We get here only if the xhci stream timer expires,
+ * call uas_workaround() with this urb as argument.
+ */
+ err = uas_workaround(urb);
+ if (err != 0) {
+ dev_err(&urb->dev->dev,
+ "%s: uas_workaround() failed, err=%d\n",
+ __func__, err);
+ goto out;
+ }
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+ return;
+ }
+
if (cmdinfo->data_in_urb == urb) {
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
cmdinfo->data_in_urb = NULL;
@@ -480,7 +782,7 @@ static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
- uas_stat_cmplt, cmnd->device->host);
+ uas_stat_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = cmdinfo->uas_tag;
urb->transfer_flags |= URB_FREE_BUFFER;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 37157ed9a881..b080a59113c0 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -45,6 +45,12 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+UNUSUAL_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
+ "Netchip",
+ "Target Product",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 7fe4f7c3f7ce..bd2c64e0fc7f 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog Device Driver for Xilinx axi/xps_timebase_wdt
*
- * (C) Copyright 2013 - 2014 Xilinx, Inc.
+ * (C) Copyright 2013 - 2019 Xilinx, Inc.
* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
*/
@@ -18,18 +18,37 @@
#include <linux/of_device.h>
#include <linux/of_address.h>
+#define XWT_WWDT_DEFAULT_TIMEOUT 10
+#define XWT_WWDT_MIN_TIMEOUT 1
+#define XWT_WWDT_MAX_TIMEOUT 80
+
/* Register offsets for the Wdt device */
#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
+#define XWT_WWREF_OFFSET 0x1000 /* Refresh Register */
+#define XWT_WWCSR_OFFSET 0x2000 /* Control/Status Register */
+#define XWT_WWOFF_OFFSET 0x2008 /* Offset Register */
+#define XWT_WWCMP0_OFFSET 0x2010 /* Compare Value Register0 */
+#define XWT_WWCMP1_OFFSET 0x2014 /* Compare Value Register1 */
+#define XWT_WWWRST_OFFSET 0x2FD0 /* Warm Reset Register */
/* Control/Status Register Masks */
-#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
-#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
-#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+#define XWT_CSR0_WRS_MASK BIT(3) /* Reset status */
+#define XWT_CSR0_WDS_MASK BIT(2) /* Timer state */
+#define XWT_CSR0_EWDT1_MASK BIT(1) /* Enable bit 1 */
/* Control/Status Register 0/1 bits */
-#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+#define XWT_CSRX_EWDT2_MASK BIT(0) /* Enable bit 2 */
+
+/* Refresh Register Masks */
+#define XWT_WWREF_GWRR_MASK BIT(0) /* Refresh and start new period */
+
+/* Generic Control/Status Register Masks */
+#define XWT_WWCSR_GWEN_MASK BIT(0) /* Enable Bit */
+
+/* Warm Reset Register Masks */
+#define XWT_WWRST_GWWRR_MASK BIT(0) /* Warm Reset Register */
/* SelfTest constants */
#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
@@ -37,10 +56,34 @@
#define WATCHDOG_NAME "Xilinx Watchdog"
+static int wdt_timeout;
+
+module_param(wdt_timeout, int, 0644);
+MODULE_PARM_DESC(wdt_timeout,
+ "Watchdog time in seconds. (default="
+ __MODULE_STRING(XWT_WWDT_DEFAULT_TIMEOUT) ")");
+
+/**
+ * enum xwdt_ip_type - WDT IP type.
+ *
+ * @XWDT_WDT: Soft wdt ip.
+ * @XWDT_WWDT: Window wdt ip.
+ */
+enum xwdt_ip_type {
+ XWDT_WDT = 0,
+ XWDT_WWDT,
+};
+
+struct xwdt_devtype_data {
+ enum xwdt_ip_type wdttype;
+ const struct watchdog_ops *xwdt_ops;
+ const struct watchdog_info *xwdt_info;
+};
+
struct xwdt_device {
void __iomem *base;
u32 wdt_interval;
- spinlock_t spinlock;
+ spinlock_t spinlock; /* spinlock for register handling */
struct watchdog_device xilinx_wdt_wdd;
struct clk *clk;
};
@@ -50,6 +93,7 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
int ret;
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
ret = clk_enable(xdev->clk);
if (ret) {
@@ -70,6 +114,8 @@ static int xilinx_wdt_start(struct watchdog_device *wdd)
spin_unlock(&xdev->spinlock);
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
return 0;
}
@@ -77,6 +123,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
{
u32 control_status_reg;
struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
spin_lock(&xdev->spinlock);
@@ -91,7 +138,7 @@ static int xilinx_wdt_stop(struct watchdog_device *wdd)
clk_disable(xdev->clk);
- pr_info("Stopped!\n");
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
return 0;
}
@@ -126,6 +173,126 @@ static const struct watchdog_ops xilinx_wdt_ops = {
.ping = xilinx_wdt_keepalive,
};
+static int xilinx_wwdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+ u32 control_status_reg;
+ u64 count;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ unsigned long clock_f = clk_get_rate(xdev->clk);
+
+ /* Calculate timeout count */
+ count = wdd->timeout * clock_f;
+ ret = clk_enable(xdev->clk);
+ if (ret) {
+ dev_err(wdd->parent, "Failed to enable clock\n");
+ return ret;
+ }
+
+ spin_lock(&xdev->spinlock);
+
+ /*
+ * Timeout count is half as there are two windows
+ * first window overflow is ignored (interrupt),
+ * reset is only generated at second window overflow
+ */
+ count = count >> 1;
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ /* Set compare and offset registers for generic watchdog timeout */
+ iowrite32((u32)count, xdev->base + XWT_WWCMP0_OFFSET);
+ iowrite32((u32)0, xdev->base + XWT_WWCMP1_OFFSET);
+ iowrite32((u32)count, xdev->base + XWT_WWOFF_OFFSET);
+
+ /* Enable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg |= (XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Started!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_stop(struct watchdog_device *wdd)
+{
+ u32 control_status_reg;
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ spin_lock(&xdev->spinlock);
+
+ /* Disable the generic watchdog timer */
+ control_status_reg = ioread32(xdev->base + XWT_WWCSR_OFFSET);
+ control_status_reg &= ~(XWT_WWCSR_GWEN_MASK);
+ iowrite32(control_status_reg, xdev->base + XWT_WWCSR_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ clk_disable(xdev->clk);
+
+ dev_dbg(xilinx_wdt_wdd->parent, "Watchdog Stopped!\n");
+
+ return 0;
+}
+
+static int xilinx_wwdt_keepalive(struct watchdog_device *wdd)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+
+ spin_lock(&xdev->spinlock);
+
+ iowrite32(XWT_WWREF_GWRR_MASK, xdev->base + XWT_WWREF_OFFSET);
+
+ spin_unlock(&xdev->spinlock);
+
+ return 0;
+}
+
+static int xilinx_wwdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_time)
+{
+ struct xwdt_device *xdev = watchdog_get_drvdata(wdd);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
+
+ if (new_time < XWT_WWDT_MIN_TIMEOUT ||
+ new_time > XWT_WWDT_MAX_TIMEOUT) {
+ dev_warn(xilinx_wdt_wdd->parent,
+ "timeout value must be %d<=x<=%d, using %d\n",
+ XWT_WWDT_MIN_TIMEOUT,
+ XWT_WWDT_MAX_TIMEOUT, new_time);
+ return -EINVAL;
+ }
+
+ wdd->timeout = new_time;
+
+ return xilinx_wwdt_start(wdd);
+}
+
+static const struct watchdog_info xilinx_wwdt_ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT,
+ .firmware_version = 1,
+ .identity = "xlnx_wwdt watchdog",
+};
+
+static const struct watchdog_ops xilinx_wwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = xilinx_wwdt_start,
+ .stop = xilinx_wwdt_stop,
+ .ping = xilinx_wwdt_keepalive,
+ .set_timeout = xilinx_wwdt_set_timeout,
+};
+
static u32 xwdt_selftest(struct xwdt_device *xdev)
{
int i;
@@ -156,6 +323,29 @@ static void xwdt_clk_disable_unprepare(void *data)
clk_disable_unprepare(data);
}
+static const struct xwdt_devtype_data xwdt_wdt_data = {
+ .wdttype = XWDT_WDT,
+ .xwdt_info = &xilinx_wdt_ident,
+ .xwdt_ops = &xilinx_wdt_ops,
+};
+
+static const struct xwdt_devtype_data xwdt_wwdt_data = {
+ .wdttype = XWDT_WWDT,
+ .xwdt_info = &xilinx_wwdt_ident,
+ .xwdt_ops = &xilinx_wwdt_ops,
+};
+
+static const struct of_device_id xwdt_of_match[] = {
+ { .compatible = "xlnx,xps-timebase-wdt-1.00.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,xps-timebase-wdt-1.01.a",
+ .data = &xwdt_wdt_data },
+ { .compatible = "xlnx,versal-wwdt-1.0",
+ .data = &xwdt_wwdt_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwdt_of_match);
+
static int xwdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -163,32 +353,46 @@ static int xwdt_probe(struct platform_device *pdev)
u32 pfreq = 0, enable_once = 0;
struct xwdt_device *xdev;
struct watchdog_device *xilinx_wdt_wdd;
+ const struct of_device_id *of_id;
+ const struct xwdt_devtype_data *devtype;
+ enum xwdt_ip_type wdttype;
xdev = devm_kzalloc(dev, sizeof(*xdev), GFP_KERNEL);
if (!xdev)
return -ENOMEM;
xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- xilinx_wdt_wdd->info = &xilinx_wdt_ident;
- xilinx_wdt_wdd->ops = &xilinx_wdt_ops;
+
+ of_id = of_match_device(xwdt_of_match, &pdev->dev);
+ if (!of_id)
+ return -EINVAL;
+
+ devtype = of_id->data;
+
+ wdttype = devtype->wdttype;
+
+ xilinx_wdt_wdd->info = devtype->xwdt_info;
+ xilinx_wdt_wdd->ops = devtype->xwdt_ops;
xilinx_wdt_wdd->parent = dev;
xdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->base))
return PTR_ERR(xdev->base);
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
- &xdev->wdt_interval);
- if (rc)
- dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
+ if (wdttype == XWDT_WDT) {
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-interval",
+ &xdev->wdt_interval);
+ if (rc)
+ dev_warn(dev, "Parameter \"xlnx,wdt-interval\" not found\n");
- rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
- &enable_once);
- if (rc)
- dev_warn(dev,
- "Parameter \"xlnx,wdt-enable-once\" not found\n");
+ rc = of_property_read_u32(dev->of_node, "xlnx,wdt-enable-once",
+ &enable_once);
+ if (rc)
+ dev_warn(dev,
+ "Parameter \"xlnx,wdt-enable-once\" not found\n");
- watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
+ }
xdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xdev->clk)) {
@@ -210,13 +414,26 @@ static int xwdt_probe(struct platform_device *pdev)
pfreq = clk_get_rate(xdev->clk);
}
- /*
- * Twice of the 2^wdt_interval / freq because the first wdt overflow is
- * ignored (interrupt), reset is only generated at second wdt overflow
- */
- if (pfreq && xdev->wdt_interval)
- xilinx_wdt_wdd->timeout = 2 * ((1 << xdev->wdt_interval) /
- pfreq);
+ if (wdttype == XWDT_WDT) {
+ /*
+ * Twice of the 2^wdt_interval / freq because
+ * the first wdt overflow is ignored (interrupt),
+ * reset is only generated at second wdt overflow
+ */
+ if (pfreq && xdev->wdt_interval)
+ xilinx_wdt_wdd->timeout =
+ 2 * ((1 << xdev->wdt_interval) /
+ pfreq);
+ } else {
+ xilinx_wdt_wdd->timeout = XWT_WWDT_DEFAULT_TIMEOUT;
+ xilinx_wdt_wdd->min_timeout = XWT_WWDT_MIN_TIMEOUT;
+ xilinx_wdt_wdd->max_timeout = XWT_WWDT_MAX_TIMEOUT;
+
+ rc = watchdog_init_timeout(xilinx_wdt_wdd,
+ wdt_timeout, &pdev->dev);
+ if (rc)
+ dev_warn(&pdev->dev, "unable to set timeout value\n");
+ }
spin_lock_init(&xdev->spinlock);
watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
@@ -231,10 +448,12 @@ static int xwdt_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = xwdt_selftest(xdev);
- if (rc == XWT_TIMER_FAILED) {
- dev_err(dev, "SelfTest routine error\n");
- return rc;
+ if (wdttype == XWDT_WDT) {
+ rc = xwdt_selftest(xdev);
+ if (rc == XWT_TIMER_FAILED) {
+ dev_err(dev, "SelfTest routine error\n");
+ return rc;
+ }
}
rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
@@ -260,9 +479,10 @@ static int xwdt_probe(struct platform_device *pdev)
static int __maybe_unused xwdt_suspend(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- xilinx_wdt_stop(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ xilinx_wdt_wdd->ops->stop(xilinx_wdt_wdd);
return 0;
}
@@ -276,24 +496,17 @@ static int __maybe_unused xwdt_suspend(struct device *dev)
static int __maybe_unused xwdt_resume(struct device *dev)
{
struct xwdt_device *xdev = dev_get_drvdata(dev);
+ struct watchdog_device *xilinx_wdt_wdd = &xdev->xilinx_wdt_wdd;
int ret = 0;
- if (watchdog_active(&xdev->xilinx_wdt_wdd))
- ret = xilinx_wdt_start(&xdev->xilinx_wdt_wdd);
+ if (watchdog_active(xilinx_wdt_wdd))
+ ret = xilinx_wdt_wdd->ops->start(xilinx_wdt_wdd);
return ret;
}
static SIMPLE_DEV_PM_OPS(xwdt_pm_ops, xwdt_suspend, xwdt_resume);
-/* Match table for of_platform binding */
-static const struct of_device_id xwdt_of_match[] = {
- { .compatible = "xlnx,xps-timebase-wdt-1.00.a", },
- { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
- {},
-};
-MODULE_DEVICE_TABLE(of, xwdt_of_match);
-
static struct platform_driver xwdt_driver = {
.probe = xwdt_probe,
.driver = {
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..76a5fc0b2737 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -123,6 +123,25 @@ struct drm_format_info {
*/
u8 block_h[4];
+ /**
+ * @pixels_per_macropixel:
+ * Number of pixels per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels, and there can be extra bits between
+ * pixels. This must be used along with @bytes_per_macropixel, only
+ * when single pixel size is not byte-aligned. In this case, @cpp
+ * is not valid and should be 0.
+ */
+ u8 pixels_per_macropixel[3];
+
+ /**
+ * @bytes_per_macropixel:
+ * Number of bytes per macro-pixel (per plane). A macro-pixel is
+ * composed of multiple pixels. The size of single macro-pixel should
+ * be byte-aligned. This should be used with @pixels_per_macropixel,
+ * and @cpp should be 0.
+ */
+ u8 bytes_per_macropixel[3];
+
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
/** @vsub: Vertical chroma subsampling factor */
@@ -318,6 +337,8 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
+uint64_t drm_format_plane_width_bytes(const struct drm_format_info *info,
+ int plane, unsigned int width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/dt-bindings/drm/mipi-dsi.h b/include/dt-bindings/drm/mipi-dsi.h
new file mode 100644
index 000000000000..c6f37ec661fe
--- /dev/null
+++ b/include/dt-bindings/drm/mipi-dsi.h
@@ -0,0 +1,11 @@
+#ifndef __DT_BINDINGS_DRM__
+#define __DT_BINDINGS_DRM__
+/*
+ * MIPI DSI pixel formats as defined in the include/drm/drm_mipi_dsi.h"
+ */
+#define MIPI_DSI_FMT_RGB888 0
+#define MIPI_DSI_FMT_RGB666 1
+#define MIPI_DSI_FMT_RGB666_PACKED 2
+#define MIPI_DSI_FMT_RGB565 3
+
+#endif /* _DT_BINDINGS_DRM__ */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 94ed3edfcc70..beb50a7483bc 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -32,5 +32,11 @@
#define XVIP_VF_CUSTOM2 13
#define XVIP_VF_CUSTOM3 14
#define XVIP_VF_CUSTOM4 15
+#define XVIP_VF_VUY_422 16
+#define XVIP_VF_BGRX 17
+#define XVIP_VF_YUVX 18
+#define XVIP_VF_XBGR 19
+#define XVIP_VF_Y_GREY 20
+#define XVIP_VF_XRGB 21
#endif /* __DT_BINDINGS_MEDIA_XILINX_VIP_H__ */
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
index 9eb2ec2b2ea9..242eb4c7ede9 100644
--- a/include/dt-bindings/net/mscc-phy-vsc8531.h
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -28,4 +28,13 @@
#define VSC8531_FORCE_LED_OFF 14
#define VSC8531_FORCE_LED_ON 15
+#define VSC8531_RGMII_CLK_DELAY_0_2_NS 0
+#define VSC8531_RGMII_CLK_DELAY_0_8_NS 1
+#define VSC8531_RGMII_CLK_DELAY_1_1_NS 2
+#define VSC8531_RGMII_CLK_DELAY_1_7_NS 3
+#define VSC8531_RGMII_CLK_DELAY_2_0_NS 4
+#define VSC8531_RGMII_CLK_DELAY_2_3_NS 5
+#define VSC8531_RGMII_CLK_DELAY_2_6_NS 6
+#define VSC8531_RGMII_CLK_DELAY_3_4_NS 7
+
#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 1f3f866fae7b..f6bc83b66ae9 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -17,5 +17,6 @@
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
#define PHY_TYPE_DP 6
+#define PHY_TYPE_SGMII 7
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynqmp.h b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
new file mode 100644
index 000000000000..65522a1f032d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynqmp.h
@@ -0,0 +1,36 @@
+/*
+ * MIO pin configuration defines for Xilinx ZynqMP
+ *
+ * Copyright (C) 2017 Xilinx, Inc.
+ * Author: Chirag Parekh <chirag.parekh@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQMP_H
+#define _DT_BINDINGS_PINCTRL_ZYNQMP_H
+
+/* Bit value for IO standards */
+#define IO_STANDARD_LVCMOS33 0
+#define IO_STANDARD_LVCMOS18 1
+
+/* Bit values for Slew Rates */
+#define SLEW_RATE_FAST 0
+#define SLEW_RATE_SLOW 1
+
+/* Bit values for Pin inputs */
+#define PIN_INPUT_TYPE_CMOS 0
+#define PIN_INPUT_TYPE_SCHMITT 1
+
+/* Bit values for drive control*/
+#define DRIVE_STRENGTH_2MA 2
+#define DRIVE_STRENGTH_4MA 4
+#define DRIVE_STRENGTH_8MA 8
+#define DRIVE_STRENGTH_12MA 12
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQMP_H */
diff --git a/include/dt-bindings/power/xlnx-versal-power.h b/include/dt-bindings/power/xlnx-versal-power.h
new file mode 100644
index 000000000000..1b75175edce5
--- /dev/null
+++ b/include/dt-bindings/power/xlnx-versal-power.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 - 2020 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_VERSAL_POWER_H
+#define _DT_BINDINGS_VERSAL_POWER_H
+
+#define PM_DEV_USB_0 (0x18224018U)
+#define PM_DEV_GEM_0 (0x18224019U)
+#define PM_DEV_GEM_1 (0x1822401aU)
+#define PM_DEV_SPI_0 (0x1822401bU)
+#define PM_DEV_SPI_1 (0x1822401cU)
+#define PM_DEV_I2C_0 (0x1822401dU)
+#define PM_DEV_I2C_1 (0x1822401eU)
+#define PM_DEV_CAN_FD_0 (0x1822401fU)
+#define PM_DEV_CAN_FD_1 (0x18224020U)
+#define PM_DEV_UART_0 (0x18224021U)
+#define PM_DEV_UART_1 (0x18224022U)
+#define PM_DEV_GPIO (0x18224023U)
+#define PM_DEV_TTC_0 (0x18224024U)
+#define PM_DEV_TTC_1 (0x18224025U)
+#define PM_DEV_TTC_2 (0x18224026U)
+#define PM_DEV_TTC_3 (0x18224027U)
+#define PM_DEV_SWDT_FPD (0x18224029U)
+#define PM_DEV_OSPI (0x1822402aU)
+#define PM_DEV_QSPI (0x1822402bU)
+#define PM_DEV_GPIO_PMC (0x1822402cU)
+#define PM_DEV_SDIO_0 (0x1822402eU)
+#define PM_DEV_SDIO_1 (0x1822402fU)
+#define PM_DEV_RTC (0x18224034U)
+#define PM_DEV_ADMA_0 (0x18224035U)
+#define PM_DEV_ADMA_1 (0x18224036U)
+#define PM_DEV_ADMA_2 (0x18224037U)
+#define PM_DEV_ADMA_3 (0x18224038U)
+#define PM_DEV_ADMA_4 (0x18224039U)
+#define PM_DEV_ADMA_5 (0x1822403aU)
+#define PM_DEV_ADMA_6 (0x1822403bU)
+#define PM_DEV_ADMA_7 (0x1822403cU)
+#define PM_DEV_AI (0x18224072U)
+
+#endif
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
index a198dd9255a4..d1135756aedf 100644
--- a/include/linux/clk/zynq.h
+++ b/include/linux/clk/zynq.h
@@ -9,6 +9,10 @@
#include <linux/spinlock.h>
+int zynq_clk_suspend_early(void);
+void zynq_clk_resume_late(void);
+void zynq_clk_topswitch_enable(void);
+void zynq_clk_topswitch_disable(void);
void zynq_clock_init(void);
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 77d70b633531..961c2ba38b02 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -103,6 +103,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_XILINX_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
@@ -132,6 +133,7 @@ enum cpuhp_state {
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_MICROBLAZE_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
diff --git a/include/linux/dma/xilinx_frmbuf.h b/include/linux/dma/xilinx_frmbuf.h
new file mode 100644
index 000000000000..54aa21e6fbec
--- /dev/null
+++ b/include/linux/dma/xilinx_frmbuf.h
@@ -0,0 +1,209 @@
+/*
+ * Xilinx Framebuffer DMA support header file
+ *
+ * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __XILINX_FRMBUF_DMA_H
+#define __XILINX_FRMBUF_DMA_H
+
+#include <linux/dmaengine.h>
+
+/* Modes to enable early callback */
+/* To avoid first frame delay */
+#define EARLY_CALLBACK BIT(1)
+/* Give callback at start of descriptor processing */
+#define EARLY_CALLBACK_START_DESC BIT(2)
+/**
+ * enum vid_frmwork_type - Linux video framework type
+ * @XDMA_DRM: fourcc is of type DRM
+ * @XDMA_V4L2: fourcc is of type V4L2
+ */
+enum vid_frmwork_type {
+ XDMA_DRM = 0,
+ XDMA_V4L2,
+};
+
+/**
+ * enum operation_mode - FB IP control register field settings to select mode
+ * @DEFAULT : Use default mode, No explicit bit field settings required.
+ * @AUTO_RESTART : Use auto-restart mode by setting BIT(7) of control register.
+ */
+enum operation_mode {
+ DEFAULT = 0x0,
+ AUTO_RESTART = BIT(7),
+};
+
+#if IS_ENABLED(CONFIG_XILINX_FRMBUF)
+/**
+ * xilinx_xdma_set_mode - Set operation mode for framebuffer IP
+ * @chan: dma channel instance
+ * @mode: Famebuffer IP operation mode.
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending(). This routine should be
+ * called by client driver to set the operation mode for framebuffer IP based
+ * upon the use-case, for e.g. for non-streaming usecases (like MEM2MEM) it's
+ * more appropriate to use default mode unlike streaming usecases where
+ * auto-restart mode is more suitable.
+ *
+ * auto-restart or free running mode.
+ */
+void xilinx_xdma_set_mode(struct dma_chan *chan, enum operation_mode mode);
+
+/**
+ * xilinx_xdma_drm_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @drm_fourcc: DRM fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc);
+
+/**
+ * xilinx_xdma_v4l2_config - configure video format in video aware DMA
+ * @chan: dma channel instance
+ * @v4l2_fourcc: V4L2 fourcc code describing the memory layout of video data
+ *
+ * This routine is used when utilizing "video format aware" Xilinx DMA IP
+ * (such as Video Framebuffer Read or Video Framebuffer Write). This call
+ * must be made prior to dma_async_issue_pending() to establish the video
+ * data memory format within the hardware DMA.
+ */
+void xilinx_xdma_v4l2_config(struct dma_chan *chan, u32 v4l2_fourcc);
+
+/**
+ * xilinx_xdma_get_drm_vid_fmts - obtain list of supported DRM mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported DRM fourcc codes
+ * @fmts: Output param - pointer to array of DRM fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of DRM fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_v4l2_vid_fmts - obtain list of supported V4L2 mem formats
+ * @chan: dma channel instance
+ * @fmt_cnt: Output param - total count of supported V4L2 fourcc codes
+ * @fmts: Output param - pointer to array of V4L2 fourcc codes (not a copy)
+ *
+ * Return: a reference to an array of V4L2 fourcc codes supported by this
+ * instance of the Video Framebuffer Driver
+ */
+int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan, u32 *fmt_cnt,
+ u32 **fmts);
+
+/**
+ * xilinx_xdma_get_fid - Get the Field ID of the buffer received.
+ * This function should be called from the callback function registered
+ * per descriptor in prep_interleaved.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @fid: Output param - Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 *fid);
+
+/**
+ * xilinx_xdma_set_fid - Set the Field ID of the buffer to be transmitted
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @fid: Field ID of the buffer. 0 - even, 1 - odd.
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx, u32 fid);
+
+/**
+ * xilinx_xdma_get_earlycb - Get info if early callback has been enabled.
+ *
+ * @chan: dma channel instance
+ * @async_tx: descriptor whose parent structure contains fid.
+ * @earlycb: Output param - Early callback mode
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *earlycb);
+
+/**
+ * xilinx_xdma_set_earlycb - Enable/Disable early callback
+ * @chan: dma channel instance
+ * @async_tx: dma async tx descriptor for the buffer
+ * @earlycb: Enable early callback mode for descriptor
+ *
+ * Return: 0 on success, -EINVAL in case of invalid chan
+ */
+int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 earlycb);
+#else
+static inline void xilinx_xdma_set_mode(struct dma_chan *chan,
+ enum operation_mode mode)
+{ }
+
+static inline void xilinx_xdma_drm_config(struct dma_chan *chan, u32 drm_fourcc)
+{ }
+
+static inline void xilinx_xdma_v4l2_config(struct dma_chan *chan,
+ u32 v4l2_fourcc)
+{ }
+
+static inline int xilinx_xdma_get_drm_vid_fmts(struct dma_chan *chan,
+ u32 *fmt_cnt, u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_v4l2_vid_fmts(struct dma_chan *chan,
+ u32 *fmt_cnt,u32 **fmts)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 *fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_fid(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *async_tx,
+ u32 fid)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_get_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 *earlycb)
+{
+ return -ENODEV;
+}
+
+static inline int xilinx_xdma_set_earlycb(struct dma_chan *chan,
+ struct dma_async_tx_descriptor *atx,
+ u32 earlycb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /*__XILINX_FRMBUF_DMA_H*/
diff --git a/include/linux/dma/xilinx_ps_pcie_dma.h b/include/linux/dma/xilinx_ps_pcie_dma.h
new file mode 100644
index 000000000000..7c9912bd490e
--- /dev/null
+++ b/include/linux/dma/xilinx_ps_pcie_dma.h
@@ -0,0 +1,69 @@
+/*
+ * Xilinx PS PCIe DMA Engine support header file
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ */
+
+#ifndef __DMA_XILINX_PS_PCIE_H
+#define __DMA_XILINX_PS_PCIE_H
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define XLNX_PLATFORM_DRIVER_NAME "xlnx-platform-dma-driver"
+
+#define ZYNQMP_DMA_DEVID (0xD024)
+#define ZYNQMP_RC_DMA_DEVID (0xD021)
+
+#define MAX_ALLOWED_CHANNELS_IN_HW 4
+
+#define MAX_NUMBER_OF_CHANNELS MAX_ALLOWED_CHANNELS_IN_HW
+
+#define DEFAULT_DMA_QUEUES 4
+#define TWO_DMA_QUEUES 2
+
+#define NUMBER_OF_BUFFER_DESCRIPTORS 1999
+#define MAX_DESCRIPTORS 65536
+
+#define CHANNEL_COAELSE_COUNT 0
+
+#define CHANNEL_POLL_TIMER_FREQUENCY 1000 /* in milli seconds */
+
+#define PCIE_AXI_DIRECTION DMA_TO_DEVICE
+#define AXI_PCIE_DIRECTION DMA_FROM_DEVICE
+
+/**
+ * struct BAR_PARAMS - PCIe Bar Parameters
+ * @BAR_PHYS_ADDR: PCIe BAR Physical address
+ * @BAR_LENGTH: Length of PCIe BAR
+ * @BAR_VIRT_ADDR: Virtual Address to access PCIe BAR
+ */
+struct BAR_PARAMS {
+ dma_addr_t BAR_PHYS_ADDR; /**< Base physical address of BAR memory */
+ unsigned long BAR_LENGTH; /**< Length of BAR memory window */
+ void *BAR_VIRT_ADDR; /**< Virtual Address of mapped BAR memory */
+};
+
+/**
+ * struct ps_pcie_dma_channel_match - Match structure for dma clients
+ * @pci_vendorid: PCIe Vendor id of PS PCIe DMA device
+ * @pci_deviceid: PCIe Device id of PS PCIe DMA device
+ * @board_number: Unique id to identify individual device in a system
+ * @channel_number: Unique channel number of the device
+ * @direction: DMA channel direction
+ * @bar_params: Pointer to BAR_PARAMS for accessing application specific data
+ */
+struct ps_pcie_dma_channel_match {
+ u16 pci_vendorid;
+ u16 pci_deviceid;
+ u16 board_number;
+ u16 channel_number;
+ enum dma_data_direction direction;
+ struct BAR_PARAMS *bar_params;
+};
+
+#endif
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 8efa5ac22d7e..f6f511617970 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -13,6 +13,8 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/device.h>
+
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -27,10 +29,15 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* ATF only commands */
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
@@ -40,7 +47,17 @@
/* Payload size (consists of callback API ID + arguments) */
#define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1)
-#define ZYNQMP_PM_MAX_QOS 100U
+#define ZYNQMP_PM_MAX_LATENCY (~0U)
+#define ZYNQMP_PM_MAX_QOS 100U
+
+/* Usage status, returned by PmGetNodeStatus */
+#define PM_USAGE_NO_MASTER 0x0U
+#define PM_USAGE_CURRENT_MASTER 0x1U
+#define PM_USAGE_OTHER_MASTER 0x2U
+#define PM_USAGE_BOTH_MASTERS (PM_USAGE_CURRENT_MASTER | \
+ PM_USAGE_OTHER_MASTER)
+
+#define GSS_NUM_REGS (4)
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
@@ -59,19 +76,49 @@
*/
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+#define XILINX_ZYNQMP_PM_FPGA_AUTHENTICATION_DDR BIT(1)
+#define XILINX_ZYNQMP_PM_FPGA_AUTHENTICATION_OCM BIT(2)
+#define XILINX_ZYNQMP_PM_FPGA_ENCRYPTION_USERKEY BIT(3)
+#define XILINX_ZYNQMP_PM_FPGA_ENCRYPTION_DEVKEY BIT(4)
enum pm_api_id {
PM_GET_API_VERSION = 1,
- PM_REQUEST_NODE = 13,
+ PM_SET_CONFIGURATION,
+ PM_GET_NODE_STATUS,
+ PM_GET_OPERATING_CHARACTERISTIC,
+ PM_REGISTER_NOTIFIER,
+ /* API for suspending */
+ PM_REQUEST_SUSPEND,
+ PM_SELF_SUSPEND,
+ PM_FORCE_POWERDOWN,
+ PM_ABORT_SUSPEND,
+ PM_REQUEST_WAKEUP,
+ PM_SET_WAKEUP_SOURCE,
+ PM_SYSTEM_SHUTDOWN,
+ /* API for managing PM slaves: */
+ PM_REQUEST_NODE,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
- PM_RESET_ASSERT = 17,
+ PM_SET_MAX_LATENCY,
+ /* Direct control API functions: */
+ PM_RESET_ASSERT,
PM_RESET_GET_STATUS,
PM_PM_INIT_FINALIZE = 21,
PM_FPGA_LOAD,
PM_FPGA_GET_STATUS,
PM_GET_CHIPID = 24,
- PM_IOCTL = 34,
+ /* ID 25 is been used by U-boot to process secure boot images */
+ /* Secure library generic API functions */
+ PM_SECURE_SHA = 26,
+ PM_SECURE_RSA,
+ /* Pin control API functions */
+ PM_PINCTRL_REQUEST,
+ PM_PINCTRL_RELEASE,
+ PM_PINCTRL_GET_FUNCTION,
+ PM_PINCTRL_SET_FUNCTION,
+ PM_PINCTRL_CONFIG_PARAM_GET,
+ PM_PINCTRL_CONFIG_PARAM_SET,
+ PM_IOCTL,
PM_QUERY_DATA,
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
@@ -82,7 +129,12 @@ enum pm_api_id {
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
- PM_SECURE_AES = 47,
+ PM_SECURE_IMAGE,
+ PM_FPGA_READ = 46,
+ PM_SECURE_AES,
+ /* PM_REGISTER_ACCESS API */
+ PM_REGISTER_ACCESS = 52,
+ PM_EFUSE_ACCESS = 53,
PM_FEATURE_CHECK = 63,
PM_API_MAX,
};
@@ -101,12 +153,33 @@ enum pm_ret_status {
};
enum pm_ioctl_id {
- IOCTL_SD_DLL_RESET = 6,
+ IOCTL_GET_RPU_OPER_MODE,
+ IOCTL_SET_RPU_OPER_MODE,
+ IOCTL_RPU_BOOT_ADDR_CONFIG,
+ IOCTL_TCM_COMB_CONFIG,
+ IOCTL_SET_TAPDELAY_BYPASS,
+ IOCTL_SET_SGMII_MODE,
+ IOCTL_SD_DLL_RESET,
IOCTL_SET_SD_TAPDELAY,
IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_WRITE_GGS,
+ IOCTL_READ_GGS,
+ IOCTL_WRITE_PGGS,
+ IOCTL_READ_PGGS,
+ /* IOCTL for ULPI reset */
+ IOCTL_ULPI_RESET,
+ /* Set healthy bit value*/
+ IOCTL_SET_BOOT_HEALTH_STATUS,
+ IOCTL_AFI,
+ /* Probe counter read/write */
+ IOCTL_PROBE_COUNTER_READ,
+ IOCTL_PROBE_COUNTER_WRITE,
+ IOCTL_OSPI_MUX_SELECT,
+ /* IOCTL for USB power request */
+ IOCTL_USB_SET_STATE,
};
enum pm_query_id {
@@ -116,7 +189,13 @@ enum pm_query_id {
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
- PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_PINCTRL_GET_NUM_PINS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
+ PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_FUNCTION_NAME,
+ PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
+ PM_QID_PINCTRL_GET_PIN_GROUPS,
+ PM_QID_CLOCK_GET_NUM_CLOCKS,
PM_QID_CLOCK_GET_MAX_DIVISOR,
};
@@ -251,6 +330,13 @@ enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
+enum zynqmp_pm_abort_reason {
+ ZYNQMP_PM_ABORT_REASON_WAKEUP_EVENT = 100,
+ ZYNQMP_PM_ABORT_REASON_POWER_UNIT_BUSY,
+ ZYNQMP_PM_ABORT_REASON_NO_POWERDOWN,
+ ZYNQMP_PM_ABORT_REASON_UNKNOWN,
+};
+
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
SUSPEND_ALERT,
@@ -263,11 +349,6 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
-enum pm_node_id {
- NODE_SD_0 = 39,
- NODE_SD_1,
-};
-
enum tap_delay_type {
PM_TAPDELAY_INPUT = 0,
PM_TAPDELAY_OUTPUT,
@@ -279,6 +360,192 @@ enum dll_reset_type {
PM_DLL_RESET_PULSE,
};
+enum pm_pinctrl_config_param {
+ PM_PINCTRL_CONFIG_SLEW_RATE,
+ PM_PINCTRL_CONFIG_BIAS_STATUS,
+ PM_PINCTRL_CONFIG_PULL_CTRL,
+ PM_PINCTRL_CONFIG_SCHMITT_CMOS,
+ PM_PINCTRL_CONFIG_DRIVE_STRENGTH,
+ PM_PINCTRL_CONFIG_VOLTAGE_STATUS,
+ PM_PINCTRL_CONFIG_TRI_STATE,
+ PM_PINCTRL_CONFIG_MAX,
+};
+
+enum pm_pinctrl_slew_rate {
+ PM_PINCTRL_SLEW_RATE_FAST,
+ PM_PINCTRL_SLEW_RATE_SLOW,
+};
+
+enum pm_pinctrl_bias_status {
+ PM_PINCTRL_BIAS_DISABLE,
+ PM_PINCTRL_BIAS_ENABLE,
+};
+
+enum pm_pinctrl_pull_ctrl {
+ PM_PINCTRL_BIAS_PULL_DOWN,
+ PM_PINCTRL_BIAS_PULL_UP,
+};
+
+enum pm_pinctrl_schmitt_cmos {
+ PM_PINCTRL_INPUT_TYPE_CMOS,
+ PM_PINCTRL_INPUT_TYPE_SCHMITT,
+};
+
+enum zynqmp_pm_opchar_type {
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_POWER = 1,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_ENERGY,
+ ZYNQMP_PM_OPERATING_CHARACTERISTIC_TEMPERATURE,
+};
+
+enum pm_pinctrl_drive_strength {
+ PM_PINCTRL_DRIVE_STRENGTH_2MA,
+ PM_PINCTRL_DRIVE_STRENGTH_4MA,
+ PM_PINCTRL_DRIVE_STRENGTH_8MA,
+ PM_PINCTRL_DRIVE_STRENGTH_12MA,
+};
+
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE,
+};
+
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP,
+ PM_RPU_MODE_SPLIT,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC,
+ PM_RPU_BOOTMEM_HIVEC,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT,
+ PM_RPU_TCM_COMB,
+};
+
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN,
+ PM_TAPDELAY_NAND_DQS_OUT,
+ PM_TAPDELAY_QSPI,
+ PM_TAPDELAY_MAX,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE,
+ PM_TAPDELAY_BYPASS_ENABLE,
+};
+
+enum sgmii_mode {
+ PM_SGMII_DISABLE,
+ PM_SGMII_ENABLE,
+};
+
+enum pm_register_access_id {
+ CONFIG_REG_WRITE,
+ CONFIG_REG_READ,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA,
+ PM_OSPI_MUX_SEL_LINEAR,
+ PM_OSPI_MUX_GET_MODE,
+};
+
+enum pm_node_id {
+ NODE_UNKNOWN = 0,
+ NODE_APU,
+ NODE_APU_0,
+ NODE_APU_1,
+ NODE_APU_2,
+ NODE_APU_3,
+ NODE_RPU,
+ NODE_RPU_0,
+ NODE_RPU_1,
+ NODE_PLD,
+ NODE_FPD,
+ NODE_OCM_BANK_0,
+ NODE_OCM_BANK_1,
+ NODE_OCM_BANK_2,
+ NODE_OCM_BANK_3,
+ NODE_TCM_0_A,
+ NODE_TCM_0_B,
+ NODE_TCM_1_A,
+ NODE_TCM_1_B,
+ NODE_L2,
+ NODE_GPU_PP_0,
+ NODE_GPU_PP_1,
+ NODE_USB_0,
+ NODE_USB_1,
+ NODE_TTC_0,
+ NODE_TTC_1,
+ NODE_TTC_2,
+ NODE_TTC_3,
+ NODE_SATA,
+ NODE_ETH_0,
+ NODE_ETH_1,
+ NODE_ETH_2,
+ NODE_ETH_3,
+ NODE_UART_0,
+ NODE_UART_1,
+ NODE_SPI_0,
+ NODE_SPI_1,
+ NODE_I2C_0,
+ NODE_I2C_1,
+ NODE_SD_0,
+ NODE_SD_1,
+ NODE_DP,
+ NODE_GDMA,
+ NODE_ADMA,
+ NODE_NAND,
+ NODE_QSPI,
+ NODE_GPIO,
+ NODE_CAN_0,
+ NODE_CAN_1,
+ NODE_EXTERN,
+ NODE_APLL,
+ NODE_VPLL,
+ NODE_DPLL,
+ NODE_RPLL,
+ NODE_IOPLL,
+ NODE_DDR,
+ NODE_IPI_APU,
+ NODE_IPI_RPU_0,
+ NODE_GPU,
+ NODE_PCIE,
+ NODE_PCAP,
+ NODE_RTC,
+ NODE_LPD,
+ NODE_VCU,
+ NODE_IPI_RPU_1,
+ NODE_IPI_PL_0,
+ NODE_IPI_PL_1,
+ NODE_IPI_PL_2,
+ NODE_IPI_PL_3,
+ NODE_PL,
+ NODE_GEM_TSU,
+ NODE_SWDT_0,
+ NODE_SWDT_1,
+ NODE_CSU,
+ NODE_PJTAG,
+ NODE_TRACE,
+ NODE_TESTSCAN,
+ NODE_PMU,
+ NODE_MAX,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -323,12 +590,50 @@ struct zynqmp_eemi_ops {
const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
+ int (*fpga_read)(const u32 reg_numframes, const u64 phys_address,
+ u32 readback_type, u32 *value);
+ int (*sha_hash)(const u64 address, const u32 size, const u32 flags);
+ int (*rsa)(const u64 address, const u32 size, const u32 flags);
+ int (*request_suspend)(const u32 node,
+ const enum zynqmp_pm_request_ack ack,
+ const u32 latency,
+ const u32 state);
+ int (*force_powerdown)(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+ int (*request_wakeup)(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+ int (*set_wakeup_source)(const u32 target,
+ const u32 wakeup_node,
+ const u32 enable);
+ int (*system_shutdown)(const u32 type, const u32 subtype);
+ int (*set_max_latency)(const u32 node, const u32 latency);
+ int (*set_configuration)(const u32 physical_addr);
+ int (*get_node_status)(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
+ int (*get_operating_characteristic)(const u32 node,
+ const enum zynqmp_pm_opchar_type
+ type, u32 *const result);
+ int (*pinctrl_request)(const u32 pin);
+ int (*pinctrl_release)(const u32 pin);
+ int (*pinctrl_get_function)(const u32 pin, u32 *id);
+ int (*pinctrl_set_function)(const u32 pin, const u32 id);
+ int (*pinctrl_get_config)(const u32 pin, const u32 param, u32 *value);
+ int (*pinctrl_set_config)(const u32 pin, const u32 param, u32 value);
+ int (*register_access)(u32 register_access_id, u32 address,
+ u32 mask, u32 value, u32 *out);
int (*aes)(const u64 address, u32 *out);
+ int (*efuse_access)(const u64 address, u32 *out);
+ int (*secure_image)(const u64 src_addr, u64 key_addr, u64 *dst);
+ int (*pdi_load)(const u32 src, const u64 address);
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_ggs_init(struct kobject *parent_kobj);
+
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index e8ca62b2cb5b..bd67af3b5451 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -9,8 +9,11 @@
#define _LINUX_FPGA_MGR_H
#include <linux/mutex.h>
+#include <linux/miscdevice.h>
#include <linux/platform_device.h>
+#define ENCRYPTED_KEY_LEN 64 /* Bytes */
+
struct fpga_manager;
struct sg_table;
@@ -62,17 +65,29 @@ enum fpga_mgr_states {
*
* %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
*
- * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted with
+ * device key
*
* %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
*
* %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+ *
+ * %FPGA_MGR_USERKEY_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted with
+ * user key
+ * %FPGA_MGR_DDR_MEM_AUTH_BITSTREAM: do bitstream authentication using DDR
+ * memory if supported
+ * %FPGA_MGR_SECURE_MEM_AUTH_BITSTREAM: do bitstream authentication using secure
+ * memory if supported
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
+#define FPGA_MGR_USERKEY_ENCRYPTED_BITSTREAM BIT(5)
+#define FPGA_MGR_DDR_MEM_AUTH_BITSTREAM BIT(6)
+#define FPGA_MGR_SECURE_MEM_AUTH_BITSTREAM BIT(7)
+#define FPGA_MGR_CONFIG_DMA_BUF BIT(8)
/**
* struct fpga_image_info - information specific to a FPGA image
@@ -82,6 +97,7 @@ enum fpga_mgr_states {
* @config_complete_timeout_us: maximum time for FPGA to switch to operating
* status in the write_complete op.
* @firmware_name: name of FPGA image firmware file
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
@@ -95,6 +111,7 @@ struct fpga_image_info {
u32 disable_timeout_us;
u32 config_complete_timeout_us;
char *firmware_name;
+ char key[ENCRYPTED_KEY_LEN];
struct sg_table *sgt;
const char *buf;
size_t count;
@@ -114,6 +131,7 @@ struct fpga_image_info {
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
+ * @read: optional: read FPGA configuration information
* @fpga_remove: optional: Set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*
@@ -132,6 +150,7 @@ struct fpga_manager_ops {
int (*write_sg)(struct fpga_manager *mgr, struct sg_table *sgt);
int (*write_complete)(struct fpga_manager *mgr,
struct fpga_image_info *info);
+ int (*read)(struct fpga_manager *mgr, struct seq_file *s);
void (*fpga_remove)(struct fpga_manager *mgr);
const struct attribute_group **groups;
};
@@ -142,6 +161,12 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2)
#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3)
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
+#define FPGA_MGR_STATUS_SECURITY_ERR BIT(5)
+#define FPGA_MGR_STATUS_DEVICE_INIT_ERR BIT(6)
+#define FPGA_MGR_STATUS_SIGNAL_ERR BIT(7)
+#define FPGA_MGR_STATUS_HIGH_Z_STATE_ERR BIT(8)
+#define FPGA_MGR_STATUS_EOS_ERR BIT(9)
+#define FPGA_MGR_STATUS_FIRMWARE_REQ_ERR BIT(10)
/**
* struct fpga_compat_id - id for compatibility check
@@ -157,21 +182,31 @@ struct fpga_compat_id {
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
+ * @flags: flags determines the type of Bitstream
+ * @key: key value useful for Encrypted Bitstream loading to read the userkey
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
* @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
* @priv: low level driver private date
+ * @dir: debugfs image directory
*/
struct fpga_manager {
const char *name;
+ unsigned long flags;
+ char key[ENCRYPTED_KEY_LEN + 1];
struct device dev;
+ struct miscdevice miscdev;
+ struct dma_buf *dmabuf;
struct mutex ref_mutex;
enum fpga_mgr_states state;
struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
void *priv;
+#ifdef CONFIG_FPGA_MGR_DEBUG_FS
+ struct dentry *dir;
+#endif
};
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
@@ -202,4 +237,6 @@ struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
const struct fpga_manager_ops *mops,
void *priv);
+#define FPGA_IOCTL_LOAD_DMA_BUFF _IOWR('R', 1, __u32)
+
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 5686711b0f40..905be6c0a527 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -134,6 +134,9 @@ struct gic_chip_data;
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
int gic_cpu_if_down(unsigned int gic_nr);
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+void gic_set_cpu(unsigned int cpu, unsigned int irq);
void gic_cpu_save(struct gic_chip_data *gic);
void gic_cpu_restore(struct gic_chip_data *gic);
void gic_dist_save(struct gic_chip_data *gic);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 2d1f4a61f4ac..30d9e2eaa801 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -73,7 +73,8 @@ struct mtd_oob_ops {
};
#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 1260
+
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..4cc752853c62 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -163,6 +163,7 @@ struct onfi_ext_param_page {
* @async_timing_mode: Supported asynchronous timing mode
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
+ * @jedec_id: jedec id of the flash
*/
struct onfi_params {
int version;
@@ -171,8 +172,10 @@ struct onfi_params {
u16 tR;
u16 tCCS;
u16 async_timing_mode;
+ u16 src_sync_timing_mode;
u16 vendor_revision;
u8 vendor[88];
+ u8 jedec_id;
};
#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 1e76196f9829..088ab49dbd72 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1285,6 +1285,24 @@ static inline bool nand_is_slc(struct nand_chip *chip)
return nanddev_bits_per_cell(&chip->base) == 1;
}
+/* return the supported asynchronous timing mode. */
+static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->parameters.onfi)
+ return ONFI_TIMING_MODE_UNKNOWN;
+
+ return chip->parameters.onfi->async_timing_mode;
+}
+
+/* return the supported synchronous timing mode. */
+static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->parameters.onfi)
+ return ONFI_TIMING_MODE_UNKNOWN;
+
+ return le16_to_cpu(chip->parameters.onfi->src_sync_timing_mode);
+}
+
/**
* Check if the opcode's address should be sent only on the lower 8 bits
* @command: opcode to check
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1e2af0ec1f03..e347ecbb0991 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -50,6 +50,9 @@
#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define SPINOR_OP_WRCR 0x81 /* Write Configuration register */
+
+#define SPI_NOR_MAX_ID_LEN 6
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -83,6 +86,7 @@
#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+#define GLOBAL_BLKPROT_UNLK 0x98 /* Clear global write protection bits */
/* Used for S3AN flashes only */
#define SPINOR_OP_XSE 0x50 /* Sector erase */
#define SPINOR_OP_XPP 0x82 /* Page program */
@@ -98,12 +102,16 @@
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+#define SPINOR_OP_BRRD 0x16 /* Bank register read */
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+/* For Micron flashes only */
+#define SPINOR_VCR_OCTAL_DDR 0xE7 /* VCR BYTE0 value for Octal DDR mode */
+
/* Status Register bits. */
#define SR_WIP BIT(0) /* Write in progress */
#define SR_WEL BIT(1) /* Write enable latch */
@@ -111,11 +119,17 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
+#define SR_BP_BIT_OFFSET 2 /* Offset to Block protect 0 */
+#define SR_BP_BIT_MASK (SR_BP2 | SR_BP1 | SR_BP0)
#define SR_BP3 BIT(5) /* Block protect 3 */
#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
#define SR_BP3_BIT6 BIT(6) /* Block protect 3 */
#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
+/* Bit to determine whether protection starts from top or bottom */
+#define SR_BP_TB 0x20
+#define BP_BITS_FROM_SR(sr) (((sr) & SR_BP_BIT_MASK) >> SR_BP_BIT_OFFSET)
+#define M25P_MAX_LOCKABLE_SECTORS 64
/* Spansion/Cypress specific status bits */
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
@@ -133,6 +147,14 @@
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
+/* Extended/Bank Address Register bits */
+#define EAR_SEGMENT_MASK 0x7 /* 128 Mb segment mask */
+enum read_mode {
+ SPI_NOR_NORMAL = 0,
+ SPI_NOR_FAST,
+ SPI_NOR_DUAL,
+ SPI_NOR_QUAD,
+};
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT1 BIT(1)
#define SR2_QUAD_EN_BIT7 BIT(7)
@@ -363,6 +385,7 @@ struct spi_nor {
struct spi_mem *spimem;
u8 *bouncebuf;
size_t bouncebuf_size;
+ struct spi_device *spi;
const struct flash_info *info;
const struct spi_nor_manufacturer *manufacturer;
u32 page_size;
@@ -371,11 +394,24 @@ struct spi_nor {
u8 read_opcode;
u8 read_dummy;
u8 program_opcode;
+ enum read_mode flash_read;
+ u32 jedec_id;
+ u16 curbank;
+ u16 n_sectors;
+ u32 sector_size;
enum spi_nor_protocol read_proto;
enum spi_nor_protocol write_proto;
enum spi_nor_protocol reg_proto;
bool sst_write_second;
u32 flags;
+ bool shift;
+ bool isparallel;
+ bool isstacked;
+ bool is_lock;
+ u8 device_id[SPI_NOR_MAX_ID_LEN];
+ bool is_addrvalid;
+ loff_t reg_addr;
+ struct delayed_work complete_work;
const struct spi_nor_controller_ops *controller_ops;
@@ -423,4 +459,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
*/
void spi_nor_restore(struct spi_nor *nor);
+int spi_nor_wait_till_ready(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/phy/phy-zynqmp.h b/include/linux/phy/phy-zynqmp.h
new file mode 100644
index 000000000000..d4185461abfe
--- /dev/null
+++ b/include/linux/phy/phy-zynqmp.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx ZynqMP PHY header
+ *
+ * Copyright (C) 2016 Xilinx, Inc.
+ *
+ * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ */
+
+#ifndef _PHY_ZYNQMP_H_
+#define _PHY_ZYNQMP_H_
+
+struct phy;
+
+#if IS_ENABLED(CONFIG_PHY_XILINX_ZYNQMP)
+
+extern int xpsgtr_override_deemph(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_margining_factor(struct phy *phy, u8 plvl, u8 vlvl);
+extern int xpsgtr_wait_pll_lock(struct phy *phy);
+int xpsgtr_usb_crst_assert(struct phy *phy);
+int xpsgtr_usb_crst_release(struct phy *phy);
+#else
+
+static inline int xpsgtr_override_deemph(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+static inline int xpsgtr_margining_factor(struct phy *base, u8 plvl, u8 vlvl)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_wait_pll_lock(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_assert(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+extern inline int xpsgtr_usb_crst_release(struct phy *phy)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif /* _PHY_ZYNQMP_H_ */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c07d7958c53..dcdab75cdf1d 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -358,6 +358,8 @@ enum rsc_handling_status {
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
+ * @peek_remote_kick: check if remote has kicked
+ * @ack_remote_kick: ack remote kick
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @handle_rsc: optional platform hook to handle vendor resources. Should return
@@ -376,6 +378,8 @@ struct rproc_ops {
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
+ bool (*peek_remote_kick)(struct rproc *rproc, char *buf, size_t *len);
+ void (*ack_remote_kick)(struct rproc *rproc);
void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
int (*handle_rsc)(struct rproc *rproc, u32 rsc_type, void *rsc,
@@ -484,6 +488,7 @@ struct rproc_dump_segment {
* @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
+ * @sysfs_kick: allow kick remoteproc from sysfs
*/
struct rproc {
struct list_head node;
@@ -517,6 +522,7 @@ struct rproc {
bool auto_boot;
struct list_head dump_segments;
int nb_vdev;
+ int sysfs_kick;
u8 elf_class;
};
diff --git a/include/linux/soc/xilinx/zynqmp/fw.h b/include/linux/soc/xilinx/zynqmp/fw.h
new file mode 100644
index 000000000000..5b79a81d96e4
--- /dev/null
+++ b/include/linux/soc/xilinx/zynqmp/fw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ */
+
+#ifndef __SOC_ZYNQMP_FW_H__
+#define __SOC_ZYNQMP_FW_H__
+
+#include <linux/nvmem-consumer.h>
+
+enum {
+ ZYNQMP_SILICON_V1 = 0,
+ ZYNQMP_SILICON_V2,
+ ZYNQMP_SILICON_V3,
+ ZYNQMP_SILICON_V4,
+};
+
+static inline char *zynqmp_nvmem_get_silicon_version(struct device *dev,
+ const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+
+ return ret;
+}
+
+#endif /* __SOC_ZYNQMP_FW_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 38286de779e3..17d32ef64b31 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -144,6 +144,7 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
+ * @multi_die: Flash device with multiple dies.
*
* @statistics: statistics for the spi_device
*
@@ -193,6 +194,7 @@ struct spi_device {
int cs_gpio; /* LEGACY: chip select gpio */
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
struct spi_delay word_delay; /* inter-word delay */
+ bool multi_die; /* flash with multiple dies*/
/* the statistics */
struct spi_statistics statistics;
@@ -506,6 +508,21 @@ struct spi_controller {
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_MASTER_QUAD_MODE BIT(6) /* support quad mode */
+ /*
+ * Controller may support data stripe feature when more than one
+ * chips are present.
+ * Setting data stripe will send data in following manner:
+ * -> even bytes i.e. 0, 2, 4,... are transmitted on lower data bus
+ * -> odd bytes i.e. 1, 3, 5,.. are transmitted on upper data bus
+ */
+#define SPI_MASTER_DATA_STRIPE BIT(7) /* support data stripe */
+ /*
+ * Controller may support asserting more than one chip select at once.
+ * This flag will enable that feature.
+ */
+#define SPI_MASTER_BOTH_CS BIT(8) /* assert both chip selects */
+#define SPI_MASTER_U_PAGE BIT(9) /* select upper flash */
/* flag indicating this is an SPI slave controller */
bool slave;
@@ -798,6 +815,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
+ * @dummy: number of dummy cycles.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
* @cs_change: affects chipselect after this transfer completes
@@ -817,6 +835,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @stripe: true-> enable stripe, false-> disable stripe.
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
* within @tx_buf for which the SPI device is requesting that the time
* snapshot for this transfer begins. Upon completing the SPI transfer,
@@ -928,6 +947,8 @@ struct spi_transfer {
struct spi_delay cs_change_delay;
struct spi_delay word_delay;
u32 speed_hz;
+ u32 dummy;
+ bool stripe;
u32 effective_speed_hz;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..ddc9d1dc9e06 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -62,6 +62,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
+#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
new file mode 100644
index 000000000000..a25043b8c8aa
--- /dev/null
+++ b/include/linux/usb/xhci_pdriver.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2018-2020 Xilinx, Inc.
+ *
+ * Michal Simek <michal.simek@xilinx.com>
+ * Anurag Kumar Vulisha <anuragku@xilinx.com>
+ */
+#ifndef __USB_CORE_XHCI_PDRIVER_H
+#define __USB_CORE_XHCI_PDRIVER_H
+
+/* Call dwc3_host_wakeup_capable() only for dwc3 DRD mode or HOST only mode */
+#if (IS_REACHABLE(CONFIG_USB_DWC3_HOST) || \
+ (IS_REACHABLE(CONFIG_USB_DWC3_OF_SIMPLE) && \
+ !IS_REACHABLE(CONFIG_USB_DWC3_GADGET)))
+
+/* Let the dwc3 driver know about device wakeup capability */
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup);
+
+#else
+void dwc3_host_wakeup_capable(struct device *dev, bool wakeup)
+{ ; }
+#endif
+
+#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/linux/xilinx_phy.h b/include/linux/xilinx_phy.h
new file mode 100644
index 000000000000..34a048f7dbe6
--- /dev/null
+++ b/include/linux/xilinx_phy.h
@@ -0,0 +1,20 @@
+#ifndef _XILINX_PHY_H
+#define _XILINX_PHY_H
+
+/* Mask used for ID comparisons */
+#define XILINX_PHY_ID_MASK 0xfffffff0
+
+/* Known PHY IDs */
+#define XILINX_PHY_ID 0x01740c00
+
+/* struct phy_device dev_flags definitions */
+#define XAE_PHY_TYPE_MII 0
+#define XAE_PHY_TYPE_GMII 1
+#define XAE_PHY_TYPE_RGMII_1_3 2
+#define XAE_PHY_TYPE_RGMII_2_0 3
+#define XAE_PHY_TYPE_SGMII 4
+#define XAE_PHY_TYPE_1000BASE_X 5
+#define XAE_PHY_TYPE_2500 6
+#define XXE_PHY_TYPE_USXGMII 7
+
+#endif /* _XILINX_PHY_H */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 8cb2c504a05c..25382b111647 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -205,6 +205,9 @@ struct media_pad {
* @link_validate: Return whether a link is valid from the entity point of
* view. The media_pipeline_start() function
* validates all links by calling this operation. Optional.
+ * @has_route: Return whether a route exists inside the entity between
+ * two given pads. Optional. If the operation isn't
+ * implemented all pads will be considered as connected.
*
* .. note::
*
@@ -217,6 +220,8 @@ struct media_entity_operations {
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
int (*link_validate)(struct media_link *link);
+ bool (*has_route)(struct media_entity *entity, unsigned int pad0,
+ unsigned int pad1);
};
/**
@@ -895,6 +900,9 @@ __must_check int media_graph_walk_init(
*/
void media_graph_walk_cleanup(struct media_graph *graph);
+bool media_entity_has_route(struct media_entity *entity, unsigned int sink,
+ unsigned int source);
+
/**
* media_graph_walk_start - Start walking the media graph at a
* given entity
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index a4848de59852..96a0402a5b67 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -710,6 +710,10 @@ struct v4l2_subdev_pad_ops {
struct v4l2_mbus_frame_desc *fd);
int (*set_frame_desc)(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_frame_desc *fd);
+ int (*get_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
+ int (*set_routing)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_routing *route);
};
/**
diff --git a/include/soc/xilinx/xlnx_vcu.h b/include/soc/xilinx/xlnx_vcu.h
new file mode 100644
index 000000000000..ff03ede993ed
--- /dev/null
+++ b/include/soc/xilinx/xlnx_vcu.h
@@ -0,0 +1,39 @@
+/*
+ * Xilinx VCU Init
+ *
+ * Copyright (C) 2016 - 2017 Xilinx, Inc.
+ *
+ * Contacts Dhaval Shah <dshah@xilinx.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _XILINX_VCU_H_
+#define _XILINX_VCU_H_
+
+#define VCU_GASKET_INIT 0x74
+#define VCU_GASKET_VALUE 0x03
+
+/**
+ * struct xvcu_device - Xilinx VCU init device structure
+ * @dev: Platform device
+ * @pll_ref: pll ref clock source
+ * @reset_gpio: vcu reset gpio
+ * @aclk: axi clock source
+ * @logicore_reg_ba: logicore reg base address
+ * @vcu_slcr_ba: vcu_slcr Register base address
+ */
+struct xvcu_device {
+ struct device *dev;
+ struct clk *pll_ref;
+ struct clk *aclk;
+ struct gpio_desc *reset_gpio;
+ void __iomem *logicore_reg_ba;
+ void __iomem *vcu_slcr_ba;
+};
+
+u32 xvcu_get_color_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_memory_depth(struct xvcu_device *xvcu);
+u32 xvcu_get_clock_frequency(struct xvcu_device *xvcu);
+u32 xvcu_get_num_cores(struct xvcu_device *xvcu);
+
+#endif /* _XILINX_VCU_H_ */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8bc0b31597d8..1a44c4a4692c 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -162,8 +162,11 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_AVUY fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
+#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', '2', '4') /* [31:0] x:Cr:Cb:Y 8:8:8:8 little endian */
+#define DRM_FORMAT_XVUY2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] x:Cr:Cb:Y 2:10:10:10 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
/*
@@ -209,6 +212,10 @@ extern "C" {
#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
+/* Grey scale */
+#define DRM_FORMAT_Y8 fourcc_code('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define DRM_FORMAT_Y10 fourcc_code('Y', '1', '0', ' ') /* 10 Greyscale */
+
/*
* 2 plane RGB + A
* index 0 = RGB plane, same format as the corresponding non _A8 format has
@@ -238,6 +245,14 @@ extern "C" {
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/*
+ * 2 plane 10 bit per component YCbCr
+ * index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
+ * index 1 = Cb:Cr plane, [63:0] x:Cb2:Cr2:Cb1:x:Cr1:Cb0:Cr0 2:10:10:10:2:10:10:10 little endian
+ */
+#define DRM_FORMAT_XV15 fourcc_code('X', 'V', '1', '5') /* 2x2 subsampled Cb:Cr plane 2:10:10:10 */
+#define DRM_FORMAT_XV20 fourcc_code('X', 'V', '2', '0') /* 2x1 subsampled Cb:Cr plane 2:10:10:10 */
+
+/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 735c8cfdaaa1..5254c393c9c9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -485,6 +485,8 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_ALTERNATE_TOP (1<<2) /* for alternate top field */
+#define DRM_MODE_FB_ALTERNATE_BOTTOM (1<<3) /* for alternate bottom field */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 84fa53ffb13f..859a26b52d70 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -64,7 +64,7 @@
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202e */
+/* YUV (including grey) - next is 0x202d */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -156,4 +156,20 @@
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
+/* RGB: Xilinx Specific - next is 0x1103 */
+#define MEDIA_BUS_FMT_RBG101010_1X30 0x1100
+#define MEDIA_BUS_FMT_RBG121212_1X36 0x1101
+#define MEDIA_BUS_FMT_RBG161616_1X48 0x1102
+
+/* YUV: Xilinx Specific - next is 0x2109 */
+#define MEDIA_BUS_FMT_VYYUYY8_1X24 0x2100
+#define MEDIA_BUS_FMT_VYYUYY10_4X20 0x2101
+#define MEDIA_BUS_FMT_VUY10_1X30 0x2102
+#define MEDIA_BUS_FMT_UYYVYY12_4X24 0x2103
+#define MEDIA_BUS_FMT_VUY12_1X36 0x2104
+#define MEDIA_BUS_FMT_Y16_1X16 0x2105
+#define MEDIA_BUS_FMT_UYYVYY16_4X32 0x2106
+#define MEDIA_BUS_FMT_VUY16_1X48 0x2107
+#define MEDIA_BUS_FMT_UYVY16_2X32 0x2108
+
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
diff --git a/include/uapi/linux/uio/uio.h b/include/uapi/linux/uio/uio.h
new file mode 100644
index 000000000000..db92d311c85f
--- /dev/null
+++ b/include/uapi/linux/uio/uio.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * The header for UIO driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ */
+
+#ifndef _UAPI_UIO_UIO_H_
+#define _UAPI_UIO_UIO_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * enum uio_dmabuf_dir - list of dma directions for mapping management
+ * @UIO_DMABUF_DIR_BIDIR: Bidirectional DMA. To and from device
+ * @UIO_DMABUF_DIR_TO_DEV: DMA to device
+ * @UIO_DMABUF_DIR_FROM_DEV: DMA from device
+ * @UIO_DMABUF_DIR_NONE: Direction not specified
+ */
+enum uio_dmabuf_dir {
+ UIO_DMABUF_DIR_BIDIR = 1,
+ UIO_DMABUF_DIR_TO_DEV = 2,
+ UIO_DMABUF_DIR_FROM_DEV = 3,
+ UIO_DMABUF_DIR_NONE = 4,
+};
+
+/**
+ * struct uio_dmabuf_args - arguments from userspace to map / unmap dmabuf
+ * @dbuf_fd: The fd or dma buf
+ * @dma_addr: The dma address of dmabuf @dbuf_fd
+ * @size: The size of dmabuf @dbuf_fd
+ * @dir: direction of dma transfer of dmabuf @dbuf_fd
+ */
+struct uio_dmabuf_args {
+ __s32 dbuf_fd;
+ __u64 dma_addr;
+ __u64 size;
+ __u8 dir;
+};
+
+#define UIO_IOC_BASE 'U'
+
+/**
+ * DOC: UIO_IOC_MAP_DMABUF - Map the dma buf to userspace uio application
+ *
+ * This takes uio_dmabuf_args, and maps the given dmabuf @dbuf_fd and returns
+ * information to userspace.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_MAP_DMABUF _IOWR(UIO_IOC_BASE, 0x1, struct uio_dmabuf_args)
+
+/**
+ * DOC: UIO_IOC_UNMAP_DMABUF - Unmap the dma buf
+ *
+ * This takes uio_dmabuf_args, and unmaps the previous mapped dmabuf @dbuf_fd.
+ * FIXME: This is experimental and may change at any time. Don't consider this
+ * as stable ABI.
+ */
+#define UIO_IOC_UNMAP_DMABUF _IOWR(UIO_IOC_BASE, 0x2, struct uio_dmabuf_args)
+
+#endif
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 123a231001a8..325c985ed06f 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -68,6 +68,8 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RBG888_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X32_PADHI),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
@@ -104,6 +106,7 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
+ V4L2_MBUS_FROM_MEDIA_BUS_FMT(VUY8_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 03970ce30741..b76f9b4afe05 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -155,6 +155,27 @@ struct v4l2_subdev_selection {
__u32 reserved[8];
};
+
+/**
+ * struct v4l2_subdev_route - A signal route inside a subdev
+ * @sink: the sink pad
+ * @source: the source pad
+ */
+struct v4l2_subdev_route {
+ __u32 sink;
+ __u32 source;
+};
+
+/**
+ * struct v4l2_subdev_routing - Routing information
+ * @num_routes: the total number of routes in the routes array
+ * @routes: the routes array
+ */
+struct v4l2_subdev_routing {
+ __u32 num_routes;
+ struct v4l2_subdev_route *routes;
+};
+
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
@@ -181,5 +202,7 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_SUBDEV_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
+#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
+#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 9817b7e2c968..d15258b63140 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -558,13 +558,18 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
+#define V4L2_PIX_FMT_XBGR30 v4l2_fourcc('R', 'X', '3', '0') /* 32 XBGR-2-10-10-10 */
+#define V4L2_PIX_FMT_XBGR40 v4l2_fourcc('R', 'X', '4', '0') /* 40 XBGR-4-12-12-12 */
+#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('R', 'G', '4', '8') /* 32 BGR-16-16-16 */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
+#define V4L2_PIX_FMT_XY10 v4l2_fourcc('X', 'Y', '1', '0') /* 10 Greyscale 2-10-10-10 */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_XY12 v4l2_fourcc('X', 'Y', '1', '2') /* 12 Greyscale 4-12-12-12 */
#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -587,6 +592,9 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
+#define V4L2_PIX_FMT_XVUY32 v4l2_fourcc('X', 'V', '3', '2') /* 32 XVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_AVUY32 v4l2_fourcc('A', 'V', '3', '2') /* 32 AVUY 8:8:8:8 */
+#define V4L2_PIX_FMT_VUY24 v4l2_fourcc('V', 'U', '2', '4') /* 24 VUY 8:8:8 */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
@@ -597,6 +605,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */
#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
+#define V4L2_PIX_FMT_XVUY10 v4l2_fourcc('X', '4', '1', '0') /* 32 XVUY 2-10-10-10 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -605,6 +614,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
+#define V4L2_PIX_FMT_XV20 v4l2_fourcc('X', 'V', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15 v4l2_fourcc('X', 'V', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012 v4l2_fourcc('X', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212 v4l2_fourcc('X', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412 v4l2_fourcc('X', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016 v4l2_fourcc('X', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216 v4l2_fourcc('X', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416 v4l2_fourcc('X', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
@@ -612,6 +629,14 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
+#define V4L2_PIX_FMT_XV20M v4l2_fourcc('X', 'M', '2', '0') /* 32 XY/UV 4:2:2 10-bit */
+#define V4L2_PIX_FMT_XV15M v4l2_fourcc('X', 'M', '1', '5') /* 32 XY/UV 4:2:0 10-bit */
+#define V4L2_PIX_FMT_X012M v4l2_fourcc('M', '0', '1', '2') /* 40 XY/UV 4:2:0 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X212M v4l2_fourcc('M', '2', '1', '2') /* 40 XY/UV 4:2:2 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X412M v4l2_fourcc('M', '4', '1', '2') /* 40 XY/UV 4:4:4 12-bit 4-12-12-12 */
+#define V4L2_PIX_FMT_X016M v4l2_fourcc('M', '0', '1', '6') /* 32 XY/UV 4:2:0 16-bit */
+#define V4L2_PIX_FMT_X216M v4l2_fourcc('M', '2', '1', '6') /* 32 XY/UV 4:2:2 16-bit */
+#define V4L2_PIX_FMT_X416M v4l2_fourcc('M', '4', '1', '6') /* 32 XY/UV 4:4:4 16-bit */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
/* three planes - Y Cb, Cr */
diff --git a/include/uapi/linux/xilinx-csi2rxss.h b/include/uapi/linux/xilinx-csi2rxss.h
new file mode 100644
index 000000000000..81e85659a160
--- /dev/null
+++ b/include/uapi/linux/xilinx-csi2rxss.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __UAPI_XILINX_CSI2RXSS_H__
+#define __UAPI_XILINX_CSI2RXSS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXCSIRX_SPKT: Short packet received
+ * V4L2_EVENT_XLNXCSIRX_SPKT_OVF: Short packet FIFO overflow
+ * V4L2_EVENT_XLNXCSIRX_SLBF: Stream line buffer full
+ */
+#define V4L2_EVENT_XLNXCSIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x100)
+#define V4L2_EVENT_XLNXCSIRX_SPKT (V4L2_EVENT_XLNXCSIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXCSIRX_SPKT_OVF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x2)
+#define V4L2_EVENT_XLNXCSIRX_SLBF (V4L2_EVENT_XLNXCSIRX_CLASS | 0x3)
+
+#endif /* __UAPI_XILINX_CSI2RXSS_H__ */
diff --git a/include/uapi/linux/xilinx-hls.h b/include/uapi/linux/xilinx-hls.h
new file mode 100644
index 000000000000..992d9c98d8f1
--- /dev/null
+++ b/include/uapi/linux/xilinx-hls.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __UAPI_XILINX_HLS_H__
+#define __UAPI_XILINX_HLS_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+struct xilinx_axi_hls_register {
+ __u32 offset;
+ __u32 value;
+};
+
+struct xilinx_axi_hls_registers {
+ __u32 num_regs;
+ struct xilinx_axi_hls_register __user *regs;
+};
+
+#define XILINX_AXI_HLS_READ _IOWR('V', BASE_VIDIOC_PRIVATE+0, struct xilinx_axi_hls_registers)
+#define XILINX_AXI_HLS_WRITE _IOW('V', BASE_VIDIOC_PRIVATE+1, struct xilinx_axi_hls_registers)
+
+#endif /* __UAPI_XILINX_HLS_H__ */
diff --git a/include/uapi/linux/xilinx-sdirxss.h b/include/uapi/linux/xilinx-sdirxss.h
new file mode 100644
index 000000000000..ab3a2242f691
--- /dev/null
+++ b/include/uapi/linux/xilinx-sdirxss.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __UAPI_XILINX_SDIRXSS_H__
+#define __UAPI_XILINX_SDIRXSS_H__
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSDIRX_UNDERFLOW: Video in to AXI4 Stream core underflowed
+ * V4L2_EVENT_XLNXSDIRX_OVERFLOW: Video in to AXI4 Stream core overflowed
+ */
+#define V4L2_EVENT_XLNXSDIRX_CLASS (V4L2_EVENT_PRIVATE_START | 0x200)
+#define V4L2_EVENT_XLNXSDIRX_UNDERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x1)
+#define V4L2_EVENT_XLNXSDIRX_OVERFLOW (V4L2_EVENT_XLNXSDIRX_CLASS | 0x2)
+
+/*
+ * This enum is used to prepare the bitmask
+ * of modes to be detected
+ */
+enum {
+ XSDIRX_MODE_SD_OFFSET = 0,
+ XSDIRX_MODE_HD_OFFSET,
+ XSDIRX_MODE_3G_OFFSET,
+ XSDIRX_MODE_6G_OFFSET,
+ XSDIRX_MODE_12GI_OFFSET,
+ XSDIRX_MODE_12GF_OFFSET,
+ XSDIRX_MODE_NUM_SUPPORTED,
+};
+
+#define XSDIRX_DETECT_ALL_MODES (BIT(XSDIRX_MODE_SD_OFFSET) | \
+ BIT(XSDIRX_MODE_HD_OFFSET) | \
+ BIT(XSDIRX_MODE_3G_OFFSET) | \
+ BIT(XSDIRX_MODE_6G_OFFSET) | \
+ BIT(XSDIRX_MODE_12GI_OFFSET) | \
+ BIT(XSDIRX_MODE_12GF_OFFSET))
+
+/*
+ * EDH Error Types
+ * ANC - Ancillary Data Packet Errors
+ * FF - Full Field Errors
+ * AP - Active Portion Errors
+ */
+
+#define XSDIRX_EDH_ERRCNT_ANC_EDH_ERR BIT(0)
+#define XSDIRX_EDH_ERRCNT_ANC_EDA_ERR BIT(1)
+#define XSDIRX_EDH_ERRCNT_ANC_IDH_ERR BIT(2)
+#define XSDIRX_EDH_ERRCNT_ANC_IDA_ERR BIT(3)
+#define XSDIRX_EDH_ERRCNT_ANC_UES_ERR BIT(4)
+#define XSDIRX_EDH_ERRCNT_FF_EDH_ERR BIT(5)
+#define XSDIRX_EDH_ERRCNT_FF_EDA_ERR BIT(6)
+#define XSDIRX_EDH_ERRCNT_FF_IDH_ERR BIT(7)
+#define XSDIRX_EDH_ERRCNT_FF_IDA_ERR BIT(8)
+#define XSDIRX_EDH_ERRCNT_FF_UES_ERR BIT(9)
+#define XSDIRX_EDH_ERRCNT_AP_EDH_ERR BIT(10)
+#define XSDIRX_EDH_ERRCNT_AP_EDA_ERR BIT(11)
+#define XSDIRX_EDH_ERRCNT_AP_IDH_ERR BIT(12)
+#define XSDIRX_EDH_ERRCNT_AP_IDA_ERR BIT(13)
+#define XSDIRX_EDH_ERRCNT_AP_UES_ERR BIT(14)
+#define XSDIRX_EDH_ERRCNT_PKT_CHKSUM_ERR BIT(15)
+
+#define XSDIRX_EDH_ALLERR_MASK 0xFFFF
+
+#endif /* __UAPI_XILINX_SDIRXSS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-controls.h b/include/uapi/linux/xilinx-v4l2-controls.h
index b6441fe705c5..23d1574c6d55 100644
--- a/include/uapi/linux/xilinx-v4l2-controls.h
+++ b/include/uapi/linux/xilinx-v4l2-controls.h
@@ -70,5 +70,166 @@
#define V4L2_CID_XILINX_TPG_STUCK_PIXEL_THRESH (V4L2_CID_XILINX_TPG + 16)
/* Noise level */
#define V4L2_CID_XILINX_TPG_NOISE_GAIN (V4L2_CID_XILINX_TPG + 17)
+/* Foreground pattern (HLS)*/
+#define V4L2_CID_XILINX_TPG_HLS_FG_PATTERN (V4L2_CID_XILINX_TPG + 18)
+/*
+ * Xilinx CRESAMPLE Video IP
+ */
+
+#define V4L2_CID_XILINX_CRESAMPLE (V4L2_CID_USER_BASE + 0xc020)
+
+/* The field parity for interlaced video */
+#define V4L2_CID_XILINX_CRESAMPLE_FIELD_PARITY (V4L2_CID_XILINX_CRESAMPLE + 1)
+/* Specify if the first line of video contains the Chroma information */
+#define V4L2_CID_XILINX_CRESAMPLE_CHROMA_PARITY (V4L2_CID_XILINX_CRESAMPLE + 2)
+
+/*
+ * Xilinx RGB2YUV Video IPs
+ */
+
+#define V4L2_CID_XILINX_RGB2YUV (V4L2_CID_USER_BASE + 0xc040)
+
+/* Maximum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMAX (V4L2_CID_XILINX_RGB2YUV + 1)
+/* Minimum Luma(Y) value */
+#define V4L2_CID_XILINX_RGB2YUV_YMIN (V4L2_CID_XILINX_RGB2YUV + 2)
+/* Maximum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMAX (V4L2_CID_XILINX_RGB2YUV + 3)
+/* Minimum Cb Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CBMIN (V4L2_CID_XILINX_RGB2YUV + 4)
+/* Maximum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMAX (V4L2_CID_XILINX_RGB2YUV + 5)
+/* Minimum Cr Chroma value */
+#define V4L2_CID_XILINX_RGB2YUV_CRMIN (V4L2_CID_XILINX_RGB2YUV + 6)
+/* The offset compensation value for Luma(Y) */
+#define V4L2_CID_XILINX_RGB2YUV_YOFFSET (V4L2_CID_XILINX_RGB2YUV + 7)
+/* The offset compensation value for Cb Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CBOFFSET (V4L2_CID_XILINX_RGB2YUV + 8)
+/* The offset compensation value for Cr Chroma */
+#define V4L2_CID_XILINX_RGB2YUV_CROFFSET (V4L2_CID_XILINX_RGB2YUV + 9)
+
+/* Y = CA * R + (1 - CA - CB) * G + CB * B */
+
+/* CA coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_ACOEF (V4L2_CID_XILINX_RGB2YUV + 10)
+/* CB coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_BCOEF (V4L2_CID_XILINX_RGB2YUV + 11)
+/* CC coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_CCOEF (V4L2_CID_XILINX_RGB2YUV + 12)
+/* CD coefficient */
+#define V4L2_CID_XILINX_RGB2YUV_DCOEF (V4L2_CID_XILINX_RGB2YUV + 13)
+
+/*
+ * Xilinx HLS Video IP
+ */
+
+#define V4L2_CID_XILINX_HLS (V4L2_CID_USER_BASE + 0xc060)
+
+/* The IP model */
+#define V4L2_CID_XILINX_HLS_MODEL (V4L2_CID_XILINX_HLS + 1)
+
+/*
+ * Xilinx MIPI CSI2 Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_MIPICSISS (V4L2_CID_USER_BASE + 0xc080)
+
+/* Active Lanes */
+#define V4L2_CID_XILINX_MIPICSISS_ACT_LANES (V4L2_CID_XILINX_MIPICSISS + 1)
+/* Frames received since streaming is set */
+#define V4L2_CID_XILINX_MIPICSISS_FRAME_COUNTER (V4L2_CID_XILINX_MIPICSISS + 2)
+/* Reset all event counters */
+#define V4L2_CID_XILINX_MIPICSISS_RESET_COUNTERS (V4L2_CID_XILINX_MIPICSISS + 3)
+
+/*
+ * Xilinx Gamma Correction IP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_GAMMA_CORR (V4L2_CID_USER_BASE + 0xc0c0)
+/* Adjust Red Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_RED_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 1)
+/* Adjust Blue Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_BLUE_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 2)
+/* Adjust Green Gamma */
+#define V4L2_CID_XILINX_GAMMA_CORR_GREEN_GAMMA (V4L2_CID_XILINX_GAMMA_CORR + 3)
+
+/*
+ * Xilinx Color Space Converter (CSC) VPSS
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_CSC (V4L2_CID_USER_BASE + 0xc0a0)
+/* Adjust Brightness */
+#define V4L2_CID_XILINX_CSC_BRIGHTNESS (V4L2_CID_XILINX_CSC + 1)
+/* Adjust Contrast */
+#define V4L2_CID_XILINX_CSC_CONTRAST (V4L2_CID_XILINX_CSC + 2)
+/* Adjust Red Gain */
+#define V4L2_CID_XILINX_CSC_RED_GAIN (V4L2_CID_XILINX_CSC + 3)
+/* Adjust Green Gain */
+#define V4L2_CID_XILINX_CSC_GREEN_GAIN (V4L2_CID_XILINX_CSC + 4)
+/* Adjust Blue Gain */
+#define V4L2_CID_XILINX_CSC_BLUE_GAIN (V4L2_CID_XILINX_CSC + 5)
+
+/*
+ * Xilinx SDI Rx Subsystem
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_SDIRX (V4L2_CID_USER_BASE + 0xc100)
+
+/* Framer Control */
+#define V4L2_CID_XILINX_SDIRX_FRAMER (V4L2_CID_XILINX_SDIRX + 1)
+/* Video Lock Window Control */
+#define V4L2_CID_XILINX_SDIRX_VIDLOCK_WINDOW (V4L2_CID_XILINX_SDIRX + 2)
+/* EDH Error Mask Control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT_ENABLE (V4L2_CID_XILINX_SDIRX + 3)
+/* Mode search Control */
+#define V4L2_CID_XILINX_SDIRX_SEARCH_MODES (V4L2_CID_XILINX_SDIRX + 4)
+/* Get Detected Mode control */
+#define V4L2_CID_XILINX_SDIRX_MODE_DETECT (V4L2_CID_XILINX_SDIRX + 5)
+/* Get CRC error status */
+#define V4L2_CID_XILINX_SDIRX_CRC (V4L2_CID_XILINX_SDIRX + 6)
+/* Get EDH error count control */
+#define V4L2_CID_XILINX_SDIRX_EDH_ERRCNT (V4L2_CID_XILINX_SDIRX + 7)
+/* Get EDH status control */
+#define V4L2_CID_XILINX_SDIRX_EDH_STATUS (V4L2_CID_XILINX_SDIRX + 8)
+/* Get Transport Interlaced status */
+#define V4L2_CID_XILINX_SDIRX_TS_IS_INTERLACED (V4L2_CID_XILINX_SDIRX + 9)
+/* Get Active Streams count */
+#define V4L2_CID_XILINX_SDIRX_ACTIVE_STREAMS (V4L2_CID_XILINX_SDIRX + 10)
+/* Is Mode 3GB */
+#define V4L2_CID_XILINX_SDIRX_IS_3GB (V4L2_CID_XILINX_SDIRX + 11)
+
+/*
+ * Xilinx VIP
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_VIP (V4L2_CID_USER_BASE + 0xc120)
+
+/* Low latency mode */
+#define V4L2_CID_XILINX_LOW_LATENCY (V4L2_CID_XILINX_VIP + 1)
+
+/* Control values to enable/disable low latency capture mode */
+#define XVIP_LOW_LATENCY_ENABLE BIT(1)
+#define XVIP_LOW_LATENCY_DISABLE BIT(2)
+
+/* Control value to start DMA */
+#define XVIP_START_DMA BIT(3)
+
+/*
+ * Xilinx SCD
+ */
+
+/* Base ID */
+#define V4L2_CID_XILINX_SCD (V4L2_CID_USER_BASE + 0xc140)
+
+/*
+ * SCD Threshold
+ * User can pass percentage as an integer to tune threshold value
+ */
+#define V4L2_CID_XILINX_SCD_THRESHOLD (V4L2_CID_XILINX_SCD + 1)
#endif /* __UAPI_XILINX_V4L2_CONTROLS_H__ */
diff --git a/include/uapi/linux/xilinx-v4l2-events.h b/include/uapi/linux/xilinx-v4l2-events.h
new file mode 100644
index 000000000000..80f99681dfb6
--- /dev/null
+++ b/include/uapi/linux/xilinx-v4l2-events.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+/*
+ * Xilinx V4L2 SCD Driver
+ *
+ * Copyright (C) 2017-2018 Xilinx, Inc.
+ *
+ * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
+ *
+ */
+
+#ifndef __UAPI_XILINX_V4L2_EVENTS_H__
+#define __UAPI_XILINX_V4L2_EVENTS_H__
+
+#include <linux/videodev2.h>
+
+/*
+ * Events
+ *
+ * V4L2_EVENT_XLNXSCD: Scene Change Detection
+ */
+#define V4L2_EVENT_XLNXSCD_CLASS (V4L2_EVENT_PRIVATE_START | 0x300)
+#define V4L2_EVENT_XLNXSCD (V4L2_EVENT_XLNXSCD_CLASS | 0x1)
+
+#endif /* __UAPI_XILINX_V4L2_EVENTS_H__ */
diff --git a/include/uapi/linux/xlnx_mpg2tsmux_interface.h b/include/uapi/linux/xlnx_mpg2tsmux_interface.h
new file mode 100644
index 000000000000..f8e055986949
--- /dev/null
+++ b/include/uapi/linux/xlnx_mpg2tsmux_interface.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+/*
+ * Xilinx mpeg2 transport stream muxer ioctl calls
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <venkateshwar.rao.gannava@xilinx.com>
+ */
+
+#ifndef __XLNX_MPG2TSMUX_INTERFACE_H__
+#define __XLNX_MPG2TSMUX_INTERFACE_H__
+
+#include <linux/ioctl.h>
+
+/**
+ * enum ts_mux_command - command for stream context
+ * @CREATE_TS_MISC: create misc
+ * @CREATE_TS_VIDEO_KEYFRAME: create video key frame
+ * @CREATE_TS_VIDEO_NON_KEYFRAME: create non key frame
+ * @CREATE_TS_AUDIO: create audio
+ * @WRITE_PAT: write pat
+ * @WRITE_PMT: write pmt
+ * @WRITE_SI: write si
+ * @INVALID: invalid
+ */
+enum ts_mux_command {
+ CREATE_TS_MISC = 0,
+ CREATE_TS_VIDEO_KEYFRAME,
+ CREATE_TS_VIDEO_NON_KEYFRAME,
+ CREATE_TS_AUDIO,
+ WRITE_PAT,
+ WRITE_PMT,
+ WRITE_SI,
+ INVALID
+};
+
+/**
+ * struct stream_context_in - struct to enqueue a stream context descriptor
+ * @command: stream context type
+ * @stream_id: stream identification number
+ * @extended_stream_id: extended stream id
+ * @is_pcr_stream: flag for pcr stream
+ * @is_valid_pts: flag for valid pts
+ * @is_valid_dts: flag for valid dts
+ * @is_dmabuf: flag to set if external src buffer is DMA allocated
+ * @pid: packet id number
+ * @size_data_in: size in bytes of input buffer
+ * @pts: presentation time stamp
+ * @dts: display time stamp
+ * @srcbuf_id: source buffer id after mmap
+ * @insert_pcr: flag for inserting pcr in stream context
+ * @pcr_extension: pcr extension number
+ * @pcr_base: pcr base number
+ */
+struct stream_context_in {
+ enum ts_mux_command command;
+ u8 stream_id;
+ u8 extended_stream_id;
+ bool is_pcr_stream;
+ bool is_valid_pts;
+ bool is_valid_dts;
+ bool is_dmabuf;
+ u16 pid;
+ u64 size_data_in;
+ u64 pts;
+ u64 dts;
+ u32 srcbuf_id;
+ bool insert_pcr;
+ u16 pcr_extension;
+ u64 pcr_base;
+};
+
+/**
+ * struct mux_context_in - struct to enqueue a mux context descriptor
+ * @is_dmabuf: flag to set if external src buffer is DMA allocated
+ * @dstbuf_id: destination buffer id after mmap
+ * @dmabuf_size: size in bytes of output buffer
+ */
+struct muxer_context_in {
+ bool is_dmabuf;
+ u32 dstbuf_id;
+ u32 dmabuf_size;
+};
+
+/**
+ * enum xlnx_tsmux_status - ip status
+ * @MPG2MUX_BUSY: device busy
+ * @MPG2MUX_READY: device ready
+ * @MPG2MUX_ERROR: error state
+ */
+enum xlnx_tsmux_status {
+ MPG2MUX_BUSY = 0,
+ MPG2MUX_READY,
+ MPG2MUX_ERROR
+};
+
+/**
+ * struct strc_bufs_info - struct to specify bufs requirement
+ * @num_buf: number of buffers
+ * @buf_size: size of each buffer
+ */
+struct strc_bufs_info {
+ u32 num_buf;
+ u32 buf_size;
+};
+
+/**
+ * struct strc_out_buf - struct to get output buffer info
+ * @buf_id: buf id into which output is written
+ * @buf_write: output bytes written in buf
+ */
+struct out_buffer {
+ u32 buf_id;
+ u32 buf_write;
+};
+
+/**
+ * enum strmtbl_cnxt - streamid table operation
+ * @NO_UPDATE: no table update
+ * @ADD_TO_TBL: add the entry to table
+ * @DEL_FR_TBL: delete the entry from table
+ */
+enum strmtbl_cnxt {
+ NO_UPDATE = 0,
+ ADD_TO_TBL,
+ DEL_FR_TBL,
+};
+
+/**
+ * struct strm_tbl_info - struct to enqueue/dequeue streamid in table
+ * @strmtbl_ctxt: enqueue/dequeue stream id
+ * @pid: stream id
+ */
+struct strc_strminfo {
+ enum strmtbl_cnxt strmtbl_ctxt;
+ u16 pid;
+};
+
+/**
+ * enum xlnx_tsmux_dma_dir - dma direction
+ * @DMA_TO_MPG2MUX: memory to device
+ * @DMA_FROM_MPG2MUX: device to memory
+ */
+enum xlnx_tsmux_dma_dir {
+ DMA_TO_MPG2MUX = 1,
+ DMA_FROM_MPG2MUX,
+};
+
+/**
+ * enum xlnx_tsmux_dmabuf_flags - dma buffer handling
+ * @DMABUF_ERROR: buffer error
+ * @DMABUF_CONTIG: contig buffer
+ * @DMABUF_NON_CONTIG: non contigs buffer
+ * @DMABUF_ATTACHED: buffer attached
+ */
+enum xlnx_tsmux_dmabuf_flags {
+ DMABUF_ERROR = 1,
+ DMABUF_CONTIG = 2,
+ DMABUF_NON_CONTIG = 4,
+ DMABUF_ATTACHED = 8,
+};
+
+/**
+ * struct xlnx_tsmux_dmabuf_info - struct to verify dma buf before enque
+ * @buf_fd: file descriptor
+ * @dir: direction of the dma buffer
+ * @flags: flags returned by the driver
+ */
+struct xlnx_tsmux_dmabuf_info {
+ int buf_fd;
+ enum xlnx_tsmux_dma_dir dir;
+ enum xlnx_tsmux_dmabuf_flags flags;
+};
+
+/* MPG2MUX IOCTL CALL LIST */
+
+#define MPG2MUX_MAGIC 'M'
+
+/**
+ * MPG2MUX_INBUFALLOC - src buffer allocation
+ */
+#define MPG2MUX_INBUFALLOC _IOWR(MPG2MUX_MAGIC, 1, struct strc_bufs_info *)
+
+/**
+ * MPG2MUX_INBUFDEALLOC - deallocates the all src buffers
+ */
+#define MPG2MUX_INBUFDEALLOC _IO(MPG2MUX_MAGIC, 2)
+
+/**
+ * MPG2MUX_OUTBUFALLOC - allocates DMA able memory for dst
+ */
+#define MPG2MUX_OUTBUFALLOC _IOWR(MPG2MUX_MAGIC, 3, struct strc_bufs_info *)
+
+/**
+ * MPG2MUX_OUTBUFDEALLOC - deallocates the all dst buffers allocated
+ */
+#define MPG2MUX_OUTBUFDEALLOC _IO(MPG2MUX_MAGIC, 4)
+
+/**
+ * MPG2MUX_STBLALLOC - allocates DMA able memory for streamid table
+ */
+#define MPG2MUX_STBLALLOC _IOW(MPG2MUX_MAGIC, 5, unsigned short *)
+
+/**
+ * MPG2MUX_STBLDEALLOC - deallocates streamid table memory
+ */
+#define MPG2MUX_STBLDEALLOC _IO(MPG2MUX_MAGIC, 6)
+
+/**
+ * MPG2MUX_TBLUPDATE - enqueue or dequeue in streamid table
+ */
+#define MPG2MUX_TBLUPDATE _IOW(MPG2MUX_MAGIC, 7, struct strc_strminfo *)
+
+/**
+ * MPG2MUX_SETSTRM - enqueue a stream descriptor in stream context
+ * linked list along with src buf address
+ */
+#define MPG2MUX_SETSTRM _IOW(MPG2MUX_MAGIC, 8, struct stream_context_in *)
+
+/**
+ * MPG2MUX_START - starts muxer IP after configuring stream
+ * and mux context registers
+ */
+#define MPG2MUX_START _IO(MPG2MUX_MAGIC, 9)
+
+/**
+ * MPG2MUX_STOP - stops the muxer IP
+ */
+#define MPG2MUX_STOP _IO(MPG2MUX_MAGIC, 10)
+
+/**
+ * MPG2MUX_STATUS - command to get the status of IP
+ */
+#define MPG2MUX_STATUS _IOR(MPG2MUX_MAGIC, 11, unsigned short *)
+
+/**
+ * MPG2MUX_GETOUTBUF - get the output buffer id with size of output data
+ */
+#define MPG2MUX_GETOUTBUF _IOW(MPG2MUX_MAGIC, 12, struct out_buffer *)
+
+/**
+ * MPG2MUX_SETMUX - enqueue a mux descriptor with dst buf address
+ */
+#define MPG2MUX_SETMUX _IOW(MPG2MUX_MAGIC, 13, struct muxer_context_in *)
+
+/**
+ * MPG2MUX_VRFY_DMABUF - status of a given dma buffer fd
+ */
+#define MPG2MUX_VDBUF _IOWR(MPG2MUX_MAGIC, 14, struct xlnx_tsmux_dmabuf_info *)
+
+#endif
diff --git a/include/uapi/linux/xlnxsync.h b/include/uapi/linux/xlnxsync.h
new file mode 100644
index 000000000000..12f10a8e5a84
--- /dev/null
+++ b/include/uapi/linux/xlnxsync.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __XLNXSYNC_H__
+#define __XLNXSYNC_H__
+
+#define XLNXSYNC_IOCTL_HDR_VER 0x10002
+
+/*
+ * This is set in the fb_id of struct xlnxsync_chan_config when
+ * configuring the channel. This makes the driver auto search for
+ * a free framebuffer slot.
+ */
+#define XLNXSYNC_AUTO_SEARCH 0xFF
+
+#define XLNXSYNC_MAX_ENC_CHAN 4
+#define XLNXSYNC_MAX_DEC_CHAN 2
+#define XLNXSYNC_BUF_PER_CHAN 3
+
+#define XLNXSYNC_PROD 0
+#define XLNXSYNC_CONS 1
+#define XLNXSYNC_IO 2
+
+#define XLNXSYNC_MAX_CORES 4
+/**
+ * struct xlnxsync_chan_config - Synchronizer channel configuration struct
+ * @hdr_ver: IOCTL header version
+ * @luma_start_offset: Start offset of Luma buffer
+ * @chroma_start_offset: Start offset of Chroma buffer
+ * @luma_end_offset: End offset of Luma buffer
+ * @chroma_end_offset: End offset of Chroma buffer
+ * @luma_margin: Margin for Luma buffer
+ * @chroma_margin: Margin for Chroma buffer
+ * @luma_core_offset: Array of 4 offsets for luma
+ * @chroma_core_offset: Array of 4 offsets for chroma
+ * @dma_fd: File descriptor of dma
+ * @fb_id: Framebuffer index. Valid values 0/1/2/XLNXSYNC_AUTO_SEARCH
+ * @ismono: Flag to indicate if buffer is Luma only.
+ * Valid 0..3 & XLNXSYNC_AUTO_SEARCH
+ *
+ * This structure contains the configuration for monitoring a particular
+ * framebuffer on a particular channel.
+ */
+struct xlnxsync_chan_config {
+ u64 hdr_ver;
+ u64 luma_start_offset[XLNXSYNC_IO];
+ u64 chroma_start_offset[XLNXSYNC_IO];
+ u64 luma_end_offset[XLNXSYNC_IO];
+ u64 chroma_end_offset[XLNXSYNC_IO];
+ u32 luma_margin;
+ u32 chroma_margin;
+ u32 luma_core_offset[XLNXSYNC_MAX_CORES];
+ u32 chroma_core_offset[XLNXSYNC_MAX_CORES];
+ u32 dma_fd;
+ u8 fb_id[XLNXSYNC_IO];
+ u8 ismono[XLNXSYNC_IO];
+};
+
+/**
+ * struct xlnxsync_clr_err - Clear channel error
+ * @hdr_ver: IOCTL header version
+ * @sync_err: Set this to clear sync error
+ * @wdg_err: Set this to clear watchdog error
+ * @ldiff_err: Set this to clear luma difference error
+ * @cdiff_err: Set this to clear chroma difference error
+ */
+struct xlnxsync_clr_err {
+ u64 hdr_ver;
+ u8 sync_err;
+ u8 wdg_err;
+ u8 ldiff_err;
+ u8 cdiff_err;
+};
+
+/**
+ * struct xlnxsync_fbdone - Framebuffer Done
+ * @hdr_ver: IOCTL header version
+ * @status: Framebuffer Done status
+ */
+struct xlnxsync_fbdone {
+ u64 hdr_ver;
+ u8 status[XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+};
+
+/**
+ * struct xlnxsync_config - Synchronizer IP configuration
+ * @hdr_ver: IOCTL header version
+ * @encode: true if encoder type, false for decoder type
+ * @max_channels: Maximum channels this IP supports
+ * @active_channels: Number of active IP channels
+ * @reserved_id: Reserved channel ID for instance
+ */
+struct xlnxsync_config {
+ u64 hdr_ver;
+ u8 encode;
+ u8 max_channels;
+ u8 active_channels;
+ u8 reserved_id;
+ u32 reserved[10];
+};
+
+/**
+ * struct xlnxsync_stat - Sync IP channel status
+ * @hdr_ver: IOCTL header version
+ * @fbdone: for every pair of luma/chroma buffer for every producer/consumer
+ * @enable: channel enable
+ * @sync_err: Synchronization error
+ * @wdg_err: Watchdog error
+ * @ldiff_err: Luma difference > 1 for channel
+ * @cdiff_err: Chroma difference > 1 for channel
+ */
+struct xlnxsync_stat {
+ u64 hdr_ver;
+ u8 fbdone[XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ u8 enable;
+ u8 sync_err;
+ u8 wdg_err;
+ u8 ldiff_err;
+ u8 cdiff_err;
+};
+
+#define XLNXSYNC_MAGIC 'X'
+
+/*
+ * This ioctl is used to get the IP config (i.e. encode / decode)
+ * and max number of channels
+ */
+#define XLNXSYNC_GET_CFG _IOR(XLNXSYNC_MAGIC, 1,\
+ struct xlnxsync_config *)
+/* This ioctl is used to get the channel status */
+#define XLNXSYNC_CHAN_GET_STATUS _IOR(XLNXSYNC_MAGIC, 2, u32 *)
+/* This is used to set the framebuffer address for a channel */
+#define XLNXSYNC_CHAN_SET_CONFIG _IOW(XLNXSYNC_MAGIC, 3,\
+ struct xlnxsync_chan_config *)
+/* Enable a channel. */
+#define XLNXSYNC_CHAN_ENABLE _IO(XLNXSYNC_MAGIC, 4)
+/* Disable a channel. */
+#define XLNXSYNC_CHAN_DISABLE _IO(XLNXSYNC_MAGIC, 5)
+/* This is used to clear the Sync and Watchdog errors for a channel */
+#define XLNXSYNC_CHAN_CLR_ERR _IOW(XLNXSYNC_MAGIC, 6,\
+ struct xlnxsync_clr_err *)
+/* This is used to get the framebuffer done status for a channel */
+#define XLNXSYNC_CHAN_GET_FBDONE_STAT _IOR(XLNXSYNC_MAGIC, 7,\
+ struct xlnxsync_fbdone *)
+/* This is used to clear the framebuffer done status for a channel */
+#define XLNXSYNC_CHAN_CLR_FBDONE_STAT _IOW(XLNXSYNC_MAGIC, 8,\
+ struct xlnxsync_fbdone *)
+#endif
diff --git a/include/uapi/linux/zocl_ioctl.h b/include/uapi/linux/zocl_ioctl.h
new file mode 100644
index 000000000000..ee1f1e289cd8
--- /dev/null
+++ b/include/uapi/linux/zocl_ioctl.h
@@ -0,0 +1,125 @@
+/*
+ * A GEM style CMA backed memory manager for ZynQ based OpenCL accelerators.
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * Authors:
+ * Sonal Santan <sonal.santan@xilinx.com>
+ * Umang Parekh <umang.parekh@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _XCL_ZOCL_IOCTL_H_
+#define _XCL_ZOCL_IOCTL_H_
+
+enum {
+ DRM_ZOCL_CREATE_BO = 0,
+ DRM_ZOCL_MAP_BO,
+ DRM_ZOCL_SYNC_BO,
+ DRM_ZOCL_INFO_BO,
+ DRM_ZOCL_PWRITE_BO,
+ DRM_ZOCL_PREAD_BO,
+ DRM_ZOCL_NUM_IOCTLS
+};
+
+enum drm_zocl_sync_bo_dir {
+ DRM_ZOCL_SYNC_BO_TO_DEVICE,
+ DRM_ZOCL_SYNC_BO_FROM_DEVICE
+};
+
+#define DRM_ZOCL_BO_FLAGS_COHERENT 0x00000001
+#define DRM_ZOCL_BO_FLAGS_CMA 0x00000002
+
+struct drm_zocl_create_bo {
+ uint64_t size;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct drm_zocl_map_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+};
+
+/**
+ * struct drm_zocl_sync_bo - used for SYNQ_BO IOCTL
+ * @handle: GEM object handle
+ * @dir: DRM_ZOCL_SYNC_DIR_XXX
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ */
+struct drm_zocl_sync_bo {
+ uint32_t handle;
+ enum drm_zocl_sync_bo_dir dir;
+ uint64_t offset;
+ uint64_t size;
+};
+
+/**
+ * struct drm_zocl_info_bo - used for INFO_BO IOCTL
+ * @handle: GEM object handle
+ * @size: Size of BO
+ * @paddr: physical address
+ */
+struct drm_zocl_info_bo {
+ uint32_t handle;
+ uint64_t size;
+ uint64_t paddr;
+};
+
+/**
+ * struct drm_zocl_pwrite_bo - used for PWRITE_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to write to
+ * @size: Length of data to write
+ * @data_ptr: Pointer to read the data from (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pwrite_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+/**
+ * struct drm_zocl_pread_bo - used for PREAD_BO IOCTL
+ * @handle: GEM object handle
+ * @pad: Padding
+ * @offset: Offset into the object to read from
+ * @size: Length of data to wrreadite
+ * @data_ptr: Pointer to write the data into (pointers not 32/64 compatible)
+ */
+struct drm_zocl_pread_bo {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t data_ptr;
+};
+
+#define DRM_IOCTL_ZOCL_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_CREATE_BO, \
+ struct drm_zocl_create_bo)
+#define DRM_IOCTL_ZOCL_MAP_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_MAP_BO, struct drm_zocl_map_bo)
+#define DRM_IOCTL_ZOCL_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_SYNC_BO, struct drm_zocl_sync_bo)
+#define DRM_IOCTL_ZOCL_INFO_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_INFO_BO, struct drm_zocl_info_bo)
+#define DRM_IOCTL_ZOCL_PWRITE_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PWRITE_BO, \
+ struct drm_zocl_pwrite_bo)
+#define DRM_IOCTL_ZOCL_PREAD_BO DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ZOCL_PREAD_BO, struct drm_zocl_pread_bo)
+#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 398dd6c90ad0..da713e196a97 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8579,7 +8579,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, 0)) {
- pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
+ pr_debug("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;
diff --git a/samples/xilinx_apm/Makefile b/samples/xilinx_apm/Makefile
new file mode 100644
index 000000000000..6182750c4479
--- /dev/null
+++ b/samples/xilinx_apm/Makefile
@@ -0,0 +1,71 @@
+#
+# 'make depend' uses makedepend to automatically generate dependencies
+# (dependencies are added to end of Makefile)
+# 'make' build executable file 'mycc'
+# 'make clean' removes all .o and executable files
+#
+
+# define the C compiler to use
+CC = $(CROSS_COMPILE)gcc
+
+# define any compile-time flags
+CFLAGS = -Wall -g
+
+# define any directories containing header files other than /usr/include
+#
+INCLUDES =
+
+# define library paths in addition to /usr/lib
+# if I wanted to include libraries not in /usr/lib I'd specify
+# their path using -Lpath, something like:
+LFLAGS =
+
+# define any libraries to link into executable:
+# if I want to link in libraries (libx.so or libx.a) I use the -llibname
+# option, something like (this will link in libmylib.so and libm.so:
+LIBS = -lm
+
+# define the C source files
+SRCS = main.c xaxipmon.c
+
+# define the C object files
+#
+# This uses Suffix Replacement within a macro:
+# $(name:string1=string2)
+# For each word in 'name' replace 'string1' with 'string2'
+# Below we are replacing the suffix .c of all words in the macro SRCS
+# with the .o suffix
+#
+OBJS = $(SRCS:.c=.o)
+
+# define the executable file
+MAIN = main
+
+#
+# The following part of the makefile is generic; it can be used to
+# build any executable just by changing the definitions above and by
+# deleting dependencies appended to the file from 'make depend'
+#
+
+.PHONY: depend clean
+
+all: $(MAIN)
+ @echo Xilinx AXI Performance Monitor application compiled
+
+$(MAIN): $(OBJS)
+ $(CC) $(CFLAGS) $(INCLUDES) -o $(MAIN) $(OBJS) $(LFLAGS) $(LIBS)
+
+# this is a suffix replacement rule for building .o's from .c's
+# it uses automatic variables $<: the name of the prerequisite of
+# the rule(a .c file) and $@: the name of the target of the rule (a .o file)
+# (see the gnu make manual section about automatic variables)
+.c.o:
+ $(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
+
+clean:
+ $(RM) *.o *~ $(MAIN)
+
+depend: $(SRCS)
+ makedepend $(INCLUDES) $^
+
+# DO NOT DELETE THIS LINE -- make depend needs it
diff --git a/samples/xilinx_apm/main.c b/samples/xilinx_apm/main.c
new file mode 100644
index 000000000000..2a7eda4ab256
--- /dev/null
+++ b/samples/xilinx_apm/main.c
@@ -0,0 +1,134 @@
+/*
+ * Xilinx AXI Performance Monitor Example
+ *
+ * Copyright (c) 2013 Xilinx Inc.
+ *
+ * The code may be used by anyone for any purpose and can serve as a
+ * starting point for developing applications using Xilinx AXI
+ * Performance Monitor.
+ *
+ * This example based on Xilinx AXI Performance Monitor UIO driver shows
+ * sequence to read metrics from Xilinx AXI Performance Monitor IP.
+ * User need to provide the uio device file with option -d:
+ * main -d /dev/uio0, say /dev/uio0 as device file for AXI Performance
+ * Monitor driver. User need not clear Interrupt Status Register after
+ * waiting for interrupt on read since driver clears it.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#include <stdint.h>
+#include "xaxipmon.h"
+
+#define MAP_SIZE 4096
+
+void usage(void)
+{
+ printf("*argv[0] -d <UIO_DEV_FILE> -i|-o <VALUE>\n");
+ printf(" -d UIO device file. e.g. /dev/uio0\n");
+ return;
+}
+
+static void start(int fd)
+{
+ u8 slot = 2;
+ int tmp;
+ u32 isr;
+
+ setmetrics(slot, XAPM_METRIC_SET_4, XAPM_METRIC_COUNTER_0);
+ setsampleinterval(0x3FFFFFF);
+
+ loadsic();
+
+ intrenable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobalenable();
+
+ enablemetricscounter();
+
+ enablesic();
+
+ isr = intrgetstatus();
+ /* Wait for SIC overflow interrupt */
+ if (read(fd, &tmp, 4) < 0)
+ perror("Read\n");
+ /* Driver clears the interrupt and occured interrupt status is
+ stored in param->isr */
+ isr = intrgetstatus();
+ if (isr & XAPM_IXR_SIC_OVERFLOW_MASK)
+ disablesic();
+
+ disablemetricscounter();
+
+ intrdisable(XAPM_IXR_SIC_OVERFLOW_MASK);
+
+ intrglobaldisable();
+
+ printf("Required metrics: %u\n",
+ getsampledmetriccounter(XAPM_METRIC_COUNTER_0) *
+ params->scalefactor);
+}
+
+int main(int argc, char *argv[])
+{
+ int c;
+ char *uiod;
+ int fd;
+
+ while ((c = getopt(argc, argv, "d:h")) != -1) {
+ switch (c) {
+ case 'd':
+ uiod = optarg;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ default:
+ printf("invalid option: %c\n", (char)c);
+ usage();
+ return -1;
+ }
+ }
+
+ /* Open the UIO device file */
+ fd = open(uiod, O_RDWR);
+ if (fd < 1) {
+ perror(argv[0]);
+ printf("Invalid UIO device file:%s.\n", uiod);
+ usage();
+ return -1;
+ }
+
+ baseaddr = (ulong)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, 0);
+ if ((u32 *)baseaddr == MAP_FAILED)
+ perror("mmap failed\n");
+
+ /* mmap the UIO device */
+ params = (struct xapm_param *)mmap(0, MAP_SIZE , PROT_READ|PROT_WRITE,
+ MAP_SHARED , fd, getpagesize());
+ if (params == MAP_FAILED)
+ perror("mmap failed\n");
+
+ if (params->mode == 1)
+ printf("AXI PMON is in Advanced Mode\n");
+ else if (params->mode == 2)
+ printf("AXI PMON is in Profile Mode\n");
+ else
+ printf("AXI PMON is in trace Mode\n");
+
+ start(fd);
+
+ close(fd);
+ munmap((u32 *)baseaddr, MAP_SIZE);
+ munmap(params, MAP_SIZE);
+
+ return 0;
+}
diff --git a/samples/xilinx_apm/xaxipmon.c b/samples/xilinx_apm/xaxipmon.c
new file mode 100644
index 000000000000..94a4e7511057
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.c
@@ -0,0 +1,1269 @@
+#include "xaxipmon.h"
+/*****************************************************************************/
+/**
+*
+* This function resets all Metric Counters and Sampled Metric Counters of
+* AXI Performance Monitor.
+*
+* @return XST_SUCCESS
+*
+*
+* @note None.
+*
+******************************************************************************/
+int resetmetriccounter(void)
+{
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Metric counters
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_MCNTR_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_MCNTR_RESET_MASK)));
+ return XST_SUCCESS;
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Global Clock Counter of AXI Performance Monitor
+*
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void resetglobalclkcounter(void)
+{
+
+ u32 regval;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * Global Clock Counter
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_GCC_RESET_MASK));
+
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_GCC_RESET_MASK)));
+
+}
+
+/*****************************************************************************/
+/**
+*
+* This function resets Streaming FIFO of AXI Performance Monitor
+*
+* @return XST_SUCCESS
+*
+* @note None.
+*
+******************************************************************************/
+int resetfifo(void)
+{
+ u32 regval;
+
+ /* Check Event Logging is enabled in Hardware */
+ if (params->eventlog == 0)
+ /*Event Logging not enabled in Hardware*/
+ return XST_SUCCESS;
+
+ /*
+ * Write the reset value to the Control register to reset
+ * FIFO
+ */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval | XAPM_CR_FIFO_RESET_MASK));
+ /*
+ * Release from Reset
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ (regval & ~(XAPM_CR_FIFO_RESET_MASK)));
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Ranges for Incrementers depending on parameters passed.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be set
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write to the specified Range register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr,
+ (XAPM_RANGE0_OFFSET + (incrementer * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of Incrementers Registers.
+*
+* @param incrementer specifies the Incrementer for which Ranges
+* need to be returned.
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Incrementer.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Incrementer.
+*
+* @return None.
+*
+* @note None
+*
+*****************************************************************************/
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_RANGE0_OFFSET +
+ (incrementer * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Sample Interval Register
+*
+* @param sampleinterval is the Sample Interval
+*
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setsampleinterval(u32 sampleinterval)
+{
+ /*
+ * Set Sample Interval
+ */
+ writereg(baseaddr, XAPM_SI_LOW_OFFSET, sampleinterval);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of Sample Interval Register
+*
+* @param sampleinterval is a pointer where Sample Interval register
+* contents are returned.
+* @return None.
+*
+* @note None.
+*
+******************************************************************************/
+void getsampleinterval(u32 *sampleinterval)
+{
+ /*
+ * Set Sample Interval Lower
+ */
+ *sampleinterval = readreg(baseaddr, XAPM_SI_LOW_OFFSET);
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets metrics for specified Counter in the corresponding
+* Metric Selector Register.
+*
+* @param slot is the slot ID for which specified counter has to
+* be connected.
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+*
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int setmetrics(u8 slot, u8 metrics, u8 counter)
+{
+ u32 regval;
+ u32 mask;
+
+ /* Find mask value to force zero in counternum byte range */
+ if (counter == 0 || counter == 4 || counter == 8)
+ mask = 0xFFFFFF00;
+ else if (counter == 1 || counter == 5 || counter == 9)
+ mask = 0xFFFF00FF;
+ else if (counter == 2 || counter == 6)
+ mask = 0xFF00FFFF;
+ else
+ mask = 0x00FFFFFF;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR0_OFFSET, regval);
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR1_OFFSET, regval);
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+
+ regval = regval & mask;
+ regval = regval | (metrics << (counter * 8));
+ regval = regval | (slot << (counter * 8 + 5));
+ writereg(baseaddr, XAPM_MSR2_OFFSET, regval);
+ }
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns metrics in the specified Counter from the corresponding
+* Metric Selector Register.
+*
+* @param counter is the Counter Number.
+* The valid values are 0 to 9.
+* @param metrics is a reference parameter from application where metrics
+* of specified counter is filled.
+* @praram slot is a reference parameter in which slot Id of
+* specified counter is filled
+* @return XST_SUCCESS if Success
+* XST_FAILURE if Failure
+*
+* @note None.
+*
+*****************************************************************************/
+int getmetrics(u8 counter, u8 *metrics, u8 *slot)
+{
+ u32 regval;
+
+ if (counter <= 3) {
+ regval = readreg(baseaddr, XAPM_MSR0_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else if ((counter >= 4) && (counter <= 7)) {
+ counter = counter - 4;
+ regval = readreg(baseaddr, XAPM_MSR1_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ } else {
+ counter = counter - 8;
+ regval = readreg(baseaddr, XAPM_MSR2_OFFSET);
+ *metrics = (regval >> (counter * 8)) & 0x1F;
+ *slot = (regval >> (counter * 8 + 5)) & 0x7;
+ }
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Global Clock Counter Register.
+*
+* @param cnthigh is the user space pointer with which upper 32 bits
+* of Global Clock Counter has to be filled
+* @param cntlow is the user space pointer with which lower 32 bits
+* of Global Clock Counter has to be filled
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow)
+{
+ *cnthigh = 0x0;
+ *cntlow = 0x0;
+
+ /*
+ * If Counter width is 64 bit then Counter Value has to be
+ * filled at CntHighValue address also.
+ */
+ if (params->globalcntwidth == 64) {
+ /* Bits[63:32] exists at XAPM_GCC_HIGH_OFFSET */
+ *cnthigh = readreg(baseaddr, XAPM_GCC_HIGH_OFFSET);
+ }
+ /* Bits[31:0] exists at XAPM_GCC_LOW_OFFSET */
+ *cntlow = readreg(baseaddr, XAPM_GCC_LOW_OFFSET);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Metric Counter Register.
+*
+* @param counter is the number of the Metric Counter to be read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @return regval is the content of specified Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0_OFFSET + (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Metric Counter Register.
+*
+* @param counter is the number of the Sampled Metric Counter to read.
+* Use the XAPM_METRIC_COUNTER* defines for the counter number in
+* xaxipmon.h. The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @return regval is the content of specified Sampled Metric Counter.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledmetriccounter(u32 counter)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SMC0_OFFSET +
+ (counter * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Incrementer Register.
+*
+* @param incrementer is the number of the Incrementer register to
+* read.Use the XAPM_INCREMENTER_* defines for the Incrementer
+* number.The valid values are 0 (XAPM_INCREMENTER_0) to
+* 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Incrementer
+* register
+* @return regval is content of specified Metric Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_INC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the contents of the Sampled Incrementer Register.
+*
+* @param incrementer is the number of the Sampled Incrementer
+* register to read.Use the XAPM_INCREMENTER_* defines for the
+* Incrementer number.The valid values are 0 (XAPM_INCREMENTER_0)
+* to 9 (XAPM_INCREMENTER_9).
+* @param incrementer is the number of the specified Sampled
+* Incrementer register
+* @return regval is content of specified Sampled Incrementer register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getsampledincrementer(u32 incrementer)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, (XAPM_SINC0_OFFSET +
+ (incrementer * 16)));
+ return regval;
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Software-written Data Register.
+*
+* @param swdata is the Software written Data.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setswdatareg(u32 swdata)
+{
+ /*
+ * Set Software-written Data Register
+ */
+ writereg(baseaddr, XAPM_SWD_OFFSET, swdata);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns contents of Software-written Data Register.
+*
+* @return swdata.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getswdatareg(void)
+{
+ u32 swdata;
+
+ /*
+ * Set Metric Selector Register
+ */
+ swdata = (u32)readreg(baseaddr, XAPM_SWD_OFFSET);
+ return swdata;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @param flagenables is a value to write to the flag enables
+* register defined by XAPM_FEC_OFFSET. It is recommended
+* to use the XAPM_FEC_*_MASK mask bits to generate.
+* A value of 0x0 will disable all events to the event
+* log streaming FIFO.
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int starteventlog(u32 flagenables)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ /* Now write to flag enables register */
+ writereg(baseaddr, XAPM_FEC_OFFSET, flagenables);
+ /* Write the new value to the Control register to
+ * enable event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Event logging
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopeventlog(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Write the new value to the Control register to disable
+ * event logging */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_EVENTLOG_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+* - All sampled metric counters
+*
+* @param sampleinterval is the sample interval
+* @return XST_SUCCESS
+*
+* @note None
+******************************************************************************/
+int startcounters(u32 sampleinterval)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval | XAPM_CR_GCC_ENABLE_MASK;
+ /*
+ * Write the new value to the Control register to enable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval | XAPM_CR_MCNTR_ENABLE_MASK);
+
+ /* Set, enable, and load sampled counters */
+ setsampleinterval(sampleinterval);
+ loadsic();
+ enablesic();
+
+ return XST_SUCCESS;
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the following in the AXI Performance Monitor:
+* - Global clock counter
+* - All metric counters
+*
+* @return XST_SUCCESS
+*
+* @note None
+*
+******************************************************************************/
+int stopcounters(void)
+{
+ u32 regval;
+
+ /* Read current register value */
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ /* Global Clock Counter is present in Advanced Mode only */
+ if (params->mode == 1)
+ regval = regval & ~XAPM_CR_GCC_ENABLE_MASK;
+
+ /*
+ * Write the new value to the Control register to disable
+ * global clock counter and metric counters
+ */
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~XAPM_CR_MCNTR_ENABLE_MASK);
+
+ return XST_SUCCESS;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_ENABLE_MASK);
+}
+/****************************************************************************/
+/**
+*
+* This function disables the Metric Counters.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemetricscounter(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_ENABLE_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets the Upper and Lower Ranges for specified Metric Counter
+* Log Enable Register.Event Logging starts when corresponding Metric Counter
+* value falls in between these ranges
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be assigned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+* @param rangehigh specifies the Upper limit in 32 bit Register
+* @param rangelow specifies the Lower limit in 32 bit Register
+* @return None
+*
+* @note None.
+*
+*****************************************************************************/
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow)
+{
+ u32 regval;
+
+ /*
+ * Write the specified Ranges to corresponding Metric Counter Log
+ * Enable Register
+ */
+ regval = rangehigh << 16;
+ regval |= rangelow;
+ writereg(baseaddr, (XAPM_MC0LOGEN_OFFSET +
+ (counter * 16)), regval);
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns the Ranges of specified Metric Counter Log
+* Enable Register.
+*
+* @param counter is the Metric Counter number for which
+* Ranges are to be returned.Use the XAPM_METRIC_COUNTER*
+* defines for the counter number in xaxipmon.h.
+* The valid values are 0 (XAPM_METRIC_COUNTER_0) to
+* 9 (XAPM_METRIC_COUNTER_9).
+*
+* @param rangehigh specifies the user reference variable which returns
+* the Upper Range Value of the specified Metric Counter
+* Log Enable Register.
+* @param rangelow specifies the user reference variable which returns
+* the Lower Range Value of the specified Metric Counter
+* Log Enable Register.
+*
+* @note None.
+*
+*****************************************************************************/
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr,
+ (XAPM_MC0LOGEN_OFFSET + (counter * 16)));
+
+ *rangelow = regval & 0xFFFF;
+ *rangehigh = (regval >> 16) & 0xFFFF;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables Event Logging.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlog(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVENTLOG_ENABLE_MASK);
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse so that Metric Counters can be
+* started on external trigger pulse for a slot.
+*
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_MCNTR_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Metric
+* Counters on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disablemctrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_MCNTR_EXTTRIGGER_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function enables External trigger pulse for Event Log
+* so that Event Logging can be started on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void enableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval | XAPM_CR_EVTLOG_EXTTRIGGER_MASK);
+}
+
+/****************************************************************************/
+/**
+*
+* This function disables the External trigger pulse used to start Event
+* Log on external trigger pulse for a slot.
+*
+* @return None
+*
+* @note None
+*
+*****************************************************************************/
+void disableeventlogtrigger(void)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+
+ writereg(baseaddr, XAPM_CTL_OFFSET,
+ regval & ~(XAPM_CR_EVTLOG_EXTTRIGGER_MASK));
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns a name for a given Metric.
+*
+* @param metrics is one of the Metric Sets. User has to use
+* XAPM_METRIC_SET_* macros in xaxipmon.h for this parameter
+*
+* @return const char *
+*
+* @note None
+*
+*****************************************************************************/
+const char *getmetricname(u8 metrics)
+{
+ if (metrics == XAPM_METRIC_SET_0)
+ return "Write Transaction Count";
+ if (metrics == XAPM_METRIC_SET_1)
+ return "Read Transaction Count";
+ if (metrics == XAPM_METRIC_SET_2)
+ return "Write Byte Count";
+ if (metrics == XAPM_METRIC_SET_3)
+ return "Read Byte Count";
+ if (metrics == XAPM_METRIC_SET_4)
+ return "Write Beat Count";
+ if (metrics == XAPM_METRIC_SET_5)
+ return "Total Read Latency";
+ if (metrics == XAPM_METRIC_SET_6)
+ return "Total Write Latency";
+ if (metrics == XAPM_METRIC_SET_7)
+ return "Slv_Wr_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_8)
+ return "Mst_Rd_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_9)
+ return "Num_BValids";
+ if (metrics == XAPM_METRIC_SET_10)
+ return "Num_WLasts";
+ if (metrics == XAPM_METRIC_SET_11)
+ return "Num_RLasts";
+ if (metrics == XAPM_METRIC_SET_12)
+ return "Minimum Write Latency";
+ if (metrics == XAPM_METRIC_SET_13)
+ return "Maximum Write Latency";
+ if (metrics == XAPM_METRIC_SET_14)
+ return "Minimum Read Latency";
+ if (metrics == XAPM_METRIC_SET_15)
+ return "Maximum Read Latency";
+ if (metrics == XAPM_METRIC_SET_16)
+ return "Transfer Cycle Count";
+ if (metrics == XAPM_METRIC_SET_17)
+ return "Packet Count";
+ if (metrics == XAPM_METRIC_SET_18)
+ return "Data Byte Count";
+ if (metrics == XAPM_METRIC_SET_19)
+ return "Position Byte Count";
+ if (metrics == XAPM_METRIC_SET_20)
+ return "Null Byte Count";
+ if (metrics == XAPM_METRIC_SET_21)
+ return "Slv_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_22)
+ return "Mst_Idle_Cnt";
+ if (metrics == XAPM_METRIC_SET_30)
+ return "External event count";
+ return "Unsupported";
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID in Latency ID register to capture Write
+* Latency metrics.
+*
+* @param writeid is the Write ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteid(u32 writeid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_WID_MASK);
+ regval = regval | writeid;
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_ID_OFFSET, writeid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID in Latency ID register to capture
+* Read Latency metrics.
+*
+* @param readid is the Read ID to be written in Latency ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadid(u32 readid)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & ~(XAPM_ID_RID_MASK);
+ regval = regval | (readid << 16);
+ writereg(baseaddr, XAPM_ID_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RID_OFFSET, readid);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID in Latency ID register.
+*
+* @return writeid is the required Write ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteid(void)
+{
+
+ u32 writeid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ writeid = regval & XAPM_ID_WID_MASK;
+ } else {
+ writeid = XAPM_IDMASK_OFFSET;
+ }
+
+ return writeid;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID in Latency ID register.
+*
+* @return readid is the required Read ID in Latency ID register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadid(void)
+{
+
+ u32 readid;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_ID_OFFSET);
+ regval = regval & XAPM_ID_RID_MASK;
+ readid = regval >> 16;
+ } else {
+ readid = XAPM_RID_OFFSET;
+ }
+
+ return readid;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate write latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTWR
+* or 1 - XAPM_LATENCY_FIRSTWR
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setwrlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTWR)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_WRLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_WRLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency Start point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_ADDR_ISSUE
+* or 1 - XAPM_LATENCY_ADDR_ACCEPT
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencystart(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_ADDR_ACCEPT)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_START_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_START_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function sets Latency End point to calculate read latency.
+*
+* @param Param can be 0 - XAPM_LATENCY_LASTRD
+* or 1 - XAPM_LATENCY_FIRSTRD
+* @return None
+*
+* @note None
+*
+******************************************************************************/
+void setrdlatencyend(u8 param)
+{
+ u32 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ if (param == XAPM_LATENCY_FIRSTRD)
+ writereg(baseaddr, XAPM_CTL_OFFSET, regval |
+ XAPM_CR_RDLATENCY_END_MASK);
+ else
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr,
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_RDLATENCY_END_MASK));
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_START_MASK;
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Write Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTWR or
+* 1 - XAPM_LATENCY_FIRSTWR.
+*
+* @note None
+*
+******************************************************************************/
+u8 getwrlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_WRLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTWR)
+ return XAPM_LATENCY_FIRSTWR;
+ else
+ return XAPM_LATENCY_LASTWR;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns read Latency Start point.
+*
+* @return Returns 0 - XAPM_LATENCY_ADDR_ISSUE or
+* 1 - XAPM_LATENCY_ADDR_ACCEPT
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencystart(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_START_MASK;
+
+ if (regval != XAPM_LATENCY_ADDR_ISSUE)
+ return XAPM_LATENCY_ADDR_ACCEPT;
+ else
+ return XAPM_LATENCY_ADDR_ISSUE;
+}
+
+/*****************************************************************************/
+/**
+*
+* This function returns Read Latency End point.
+*
+* @return Returns 0 - XAPM_LATENCY_LASTRD or
+* 1 - XAPM_LATENCY_FIRSTRD.
+*
+* @note None
+*
+******************************************************************************/
+u8 getrdlatencyend(void)
+{
+ u8 regval;
+
+ regval = readreg(baseaddr, XAPM_CTL_OFFSET);
+ regval = regval & XAPM_CR_RDLATENCY_END_MASK;
+ if (regval != XAPM_LATENCY_LASTRD)
+ return XAPM_LATENCY_FIRSTRD;
+ else
+ return XAPM_LATENCY_LASTRD;
+
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Write ID Mask in ID Mask register.
+*
+* @param wrmask is the Write ID mask to be written in ID register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setwriteidmask(u32 wrmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_WID_MASK);
+ regval = regval | wrmask;
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, wrmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function sets Read ID Mask in ID Mask register.
+*
+* @param rdmask is the Read ID mask to be written in ID Mask register.
+*
+* @return None.
+*
+* @note None.
+*
+*****************************************************************************/
+void setreadidmask(u32 rdmask)
+{
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & ~(XAPM_MASKID_RID_MASK);
+ regval = regval | (rdmask << 16);
+ writereg(baseaddr, XAPM_IDMASK_OFFSET, regval);
+ } else {
+ writereg(baseaddr, XAPM_RIDMASK_OFFSET, rdmask);
+ }
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Write ID Mask in ID Mask register.
+*
+* @return wrmask is the required Write ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getwriteidmask(void)
+{
+
+ u32 wrmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ wrmask = regval & XAPM_MASKID_WID_MASK;
+ } else {
+ wrmask = XAPM_IDMASK_OFFSET;
+ }
+ return wrmask;
+}
+
+/****************************************************************************/
+/**
+*
+* This function returns Read ID Mask in ID Mask register.
+*
+* @return rdmask is the required Read ID Mask in ID Mask register.
+*
+* @note None.
+*
+*****************************************************************************/
+u32 getreadidmask(void)
+{
+
+ u32 rdmask;
+ u32 regval;
+
+ if (params->is_32bit_filter == 0) {
+ regval = readreg(baseaddr, XAPM_IDMASK_OFFSET);
+ regval = regval & XAPM_MASKID_RID_MASK;
+ rdmask = regval >> 16;
+ } else {
+ rdmask = XAPM_RIDMASK_OFFSET;
+ }
+ return rdmask;
+}
diff --git a/samples/xilinx_apm/xaxipmon.h b/samples/xilinx_apm/xaxipmon.h
new file mode 100644
index 000000000000..85e0e902a1c5
--- /dev/null
+++ b/samples/xilinx_apm/xaxipmon.h
@@ -0,0 +1,943 @@
+#ifndef XAXIPMON_H /* Prevent circular inclusions */
+#define XAXIPMON_H /* by using protection macros */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+
+
+#define XST_SUCCESS 0
+#define XST_FAILURE 1
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define XAPM_GCC_HIGH_OFFSET 0x0000 /* Global Clock Counter
+ 32 to 63 bits */
+#define XAPM_GCC_LOW_OFFSET 0x0004 /* Global Clock Counter Lower
+ 0-31 bits */
+#define XAPM_SI_HIGH_OFFSET 0x0020 /* Sample Interval MSB */
+#define XAPM_SI_LOW_OFFSET 0x0024 /* Sample Interval LSB */
+#define XAPM_SICR_OFFSET 0x0028 /* Sample Interval Control
+ Register */
+#define XAPM_SR_OFFSET 0x002C /* Sample Register */
+#define XAPM_GIE_OFFSET 0x0030 /* Global Interrupt Enable
+ Register */
+#define XAPM_IE_OFFSET 0x0034 /* Interrupt Enable Register */
+#define XAPM_IS_OFFSET 0x0038 /* Interrupt Status Register */
+
+#define XAPM_MSR0_OFFSET 0x0044 /* Metric Selector 0 Register*/
+#define XAPM_MSR1_OFFSET 0x0048 /* Metric Selector 1 Register*/
+#define XAPM_MSR2_OFFSET 0x004C /* Metric Selector 2 Register*/
+
+#define XAPM_MC0_OFFSET 0x0100 /* Metric Counter 0 Register */
+#define XAPM_INC0_OFFSET 0x0104 /* Incrementer 0 Register */
+#define XAPM_RANGE0_OFFSET 0x0108 /* Range 0 Register */
+#define XAPM_MC0LOGEN_OFFSET 0x010C /* Metric Counter 0
+ Log Enable Register */
+#define XAPM_MC1_OFFSET 0x0110 /* Metric Counter 1 Register */
+#define XAPM_INC1_OFFSET 0x0114 /* Incrementer 1 Register */
+#define XAPM_RANGE1_OFFSET 0x0118 /* Range 1 Register */
+#define XAPM_MC1LOGEN_OFFSET 0x011C /* Metric Counter 1
+ Log Enable Register */
+#define XAPM_MC2_OFFSET 0x0120 /* Metric Counter 2 Register */
+#define XAPM_INC2_OFFSET 0x0124 /* Incrementer 2 Register */
+#define XAPM_RANGE2_OFFSET 0x0128 /* Range 2 Register */
+#define XAPM_MC2LOGEN_OFFSET 0x012C /* Metric Counter 2
+ Log Enable Register */
+#define XAPM_MC3_OFFSET 0x0130 /* Metric Counter 3 Register */
+#define XAPM_INC3_OFFSET 0x0134 /* Incrementer 3 Register */
+#define XAPM_RANGE3_OFFSET 0x0138 /* Range 3 Register */
+#define XAPM_MC3LOGEN_OFFSET 0x013C /* Metric Counter 3
+ Log Enable Register */
+#define XAPM_MC4_OFFSET 0x0140 /* Metric Counter 4 Register */
+#define XAPM_INC4_OFFSET 0x0144 /* Incrementer 4 Register */
+#define XAPM_RANGE4_OFFSET 0x0148 /* Range 4 Register */
+#define XAPM_MC4LOGEN_OFFSET 0x014C /* Metric Counter 4
+ Log Enable Register */
+#define XAPM_MC5_OFFSET 0x0150 /* Metric Counter 5
+ Register */
+#define XAPM_INC5_OFFSET 0x0154 /* Incrementer 5 Register */
+#define XAPM_RANGE5_OFFSET 0x0158 /* Range 5 Register */
+#define XAPM_MC5LOGEN_OFFSET 0x015C /* Metric Counter 5
+ Log Enable Register */
+#define XAPM_MC6_OFFSET 0x0160 /* Metric Counter 6
+ Register */
+#define XAPM_INC6_OFFSET 0x0164 /* Incrementer 6 Register */
+#define XAPM_RANGE6_OFFSET 0x0168 /* Range 6 Register */
+#define XAPM_MC6LOGEN_OFFSET 0x016C /* Metric Counter 6
+ Log Enable Register */
+#define XAPM_MC7_OFFSET 0x0170 /* Metric Counter 7
+ Register */
+#define XAPM_INC7_OFFSET 0x0174 /* Incrementer 7 Register */
+#define XAPM_RANGE7_OFFSET 0x0178 /* Range 7 Register */
+#define XAPM_MC7LOGEN_OFFSET 0x017C /* Metric Counter 7
+ Log Enable Register */
+#define XAPM_MC8_OFFSET 0x0180 /* Metric Counter 8
+ Register */
+#define XAPM_INC8_OFFSET 0x0184 /* Incrementer 8 Register */
+#define XAPM_RANGE8_OFFSET 0x0188 /* Range 8 Register */
+#define XAPM_MC8LOGEN_OFFSET 0x018C /* Metric Counter 8
+ Log Enable Register */
+#define XAPM_MC9_OFFSET 0x0190 /* Metric Counter 9
+ Register */
+#define XAPM_INC9_OFFSET 0x0194 /* Incrementer 9 Register */
+#define XAPM_RANGE9_OFFSET 0x0198 /* Range 9 Register */
+#define XAPM_MC9LOGEN_OFFSET 0x019C /* Metric Counter 9
+ Log Enable Register */
+
+#define XAPM_MC10_OFFSET 0x01A0 /* Metric Counter 10
+ Register */
+#define XAPM_MC11_OFFSET 0x01B0 /* Metric Counter 11
+ Register */
+#define XAPM_MC12_OFFSET 0x0500 /* Metric Counter 12
+ Register */
+#define XAPM_MC13_OFFSET 0x0510 /* Metric Counter 13
+ Register */
+#define XAPM_MC14_OFFSET 0x0520 /* Metric Counter 14
+ Register */
+#define XAPM_MC15_OFFSET 0x0530 /* Metric Counter 15
+ Register */
+#define XAPM_MC16_OFFSET 0x0540 /* Metric Counter 16
+ Register */
+#define XAPM_MC17_OFFSET 0x0550 /* Metric Counter 17
+ Register */
+#define XAPM_MC18_OFFSET 0x0560 /* Metric Counter 18
+ Register */
+#define XAPM_MC19_OFFSET 0x0570 /* Metric Counter 19
+ Register */
+#define XAPM_MC20_OFFSET 0x0580 /* Metric Counter 20
+ Register */
+#define XAPM_MC21_OFFSET 0x0590 /* Metric Counter 21
+ Register */
+#define XAPM_MC22_OFFSET 0x05A0 /* Metric Counter 22
+ Register */
+#define XAPM_MC23_OFFSET 0x05B0 /* Metric Counter 23
+ Register */
+#define XAPM_MC24_OFFSET 0x0700 /* Metric Counter 24
+ Register */
+#define XAPM_MC25_OFFSET 0x0710 /* Metric Counter 25
+ Register */
+#define XAPM_MC26_OFFSET 0x0720 /* Metric Counter 26
+ Register */
+#define XAPM_MC27_OFFSET 0x0730 /* Metric Counter 27
+ Register */
+#define XAPM_MC28_OFFSET 0x0740 /* Metric Counter 28
+ Register */
+#define XAPM_MC29_OFFSET 0x0750 /* Metric Counter 29
+ Register */
+#define XAPM_MC30_OFFSET 0x0760 /* Metric Counter 30
+ Register */
+#define XAPM_MC31_OFFSET 0x0770 /* Metric Counter 31
+ Register */
+#define XAPM_MC32_OFFSET 0x0780 /* Metric Counter 32
+ Register */
+#define XAPM_MC33_OFFSET 0x0790 /* Metric Counter 33
+ Register */
+#define XAPM_MC34_OFFSET 0x07A0 /* Metric Counter 34
+ Register */
+#define XAPM_MC35_OFFSET 0x07B0 /* Metric Counter 35
+ Register */
+#define XAPM_MC36_OFFSET 0x0900 /* Metric Counter 36
+ Register */
+#define XAPM_MC37_OFFSET 0x0910 /* Metric Counter 37
+ Register */
+#define XAPM_MC38_OFFSET 0x0920 /* Metric Counter 38
+ Register */
+#define XAPM_MC39_OFFSET 0x0930 /* Metric Counter 39
+ Register */
+#define XAPM_MC40_OFFSET 0x0940 /* Metric Counter 40
+ Register */
+#define XAPM_MC41_OFFSET 0x0950 /* Metric Counter 41
+ Register */
+#define XAPM_MC42_OFFSET 0x0960 /* Metric Counter 42
+ Register */
+#define XAPM_MC43_OFFSET 0x0970 /* Metric Counter 43
+ Register */
+#define XAPM_MC44_OFFSET 0x0980 /* Metric Counter 44
+ Register */
+#define XAPM_MC45_OFFSET 0x0990 /* Metric Counter 45
+ Register */
+#define XAPM_MC46_OFFSET 0x09A0 /* Metric Counter 46
+ Register */
+#define XAPM_MC47_OFFSET 0x09B0 /* Metric Counter 47
+ Register */
+
+#define XAPM_SMC0_OFFSET 0x0200 /* Sampled Metric Counter
+ 0 Register */
+#define XAPM_SINC0_OFFSET 0x0204 /* Sampled Incrementer
+ 0 Register */
+#define XAPM_SMC1_OFFSET 0x0210 /* Sampled Metric Counter
+ 1 Register */
+#define XAPM_SINC1_OFFSET 0x0214 /* Sampled Incrementer
+ 1 Register */
+#define XAPM_SMC2_OFFSET 0x0220 /* Sampled Metric Counter
+ 2 Register */
+#define XAPM_SINC2_OFFSET 0x0224 /* Sampled Incrementer
+ 2 Register */
+#define XAPM_SMC3_OFFSET 0x0230 /* Sampled Metric Counter
+ 3 Register */
+#define XAPM_SINC3_OFFSET 0x0234 /* Sampled Incrementer
+ 3 Register */
+#define XAPM_SMC4_OFFSET 0x0240 /* Sampled Metric Counter
+ 4 Register */
+#define XAPM_SINC4_OFFSET 0x0244 /* Sampled Incrementer
+ 4 Register */
+#define XAPM_SMC5_OFFSET 0x0250 /* Sampled Metric Counter
+ 5 Register */
+#define XAPM_SINC5_OFFSET 0x0254 /* Sampled Incrementer
+ 5 Register */
+#define XAPM_SMC6_OFFSET 0x0260 /* Sampled Metric Counter
+ 6 Register */
+#define XAPM_SINC6_OFFSET 0x0264 /* Sampled Incrementer
+ 6 Register */
+#define XAPM_SMC7_OFFSET 0x0270 /* Sampled Metric Counter
+ 7 Register */
+#define XAPM_SINC7_OFFSET 0x0274 /* Sampled Incrementer
+ 7 Register */
+#define XAPM_SMC8_OFFSET 0x0280 /* Sampled Metric Counter
+ 8 Register */
+#define XAPM_SINC8_OFFSET 0x0284 /* Sampled Incrementer
+ 8 Register */
+#define XAPM_SMC9_OFFSET 0x0290 /* Sampled Metric Counter
+ 9 Register */
+#define XAPM_SINC9_OFFSET 0x0294 /* Sampled Incrementer
+ 9 Register */
+#define XAPM_SMC10_OFFSET 0x02A0 /* Sampled Metric Counter
+ 10 Register */
+#define XAPM_SMC11_OFFSET 0x02B0 /* Sampled Metric Counter
+ 11 Register */
+#define XAPM_SMC12_OFFSET 0x0600 /* Sampled Metric Counter
+ 12 Register */
+#define XAPM_SMC13_OFFSET 0x0610 /* Sampled Metric Counter
+ 13 Register */
+#define XAPM_SMC14_OFFSET 0x0620 /* Sampled Metric Counter
+ 14 Register */
+#define XAPM_SMC15_OFFSET 0x0630 /* Sampled Metric Counter
+ 15 Register */
+#define XAPM_SMC16_OFFSET 0x0640 /* Sampled Metric Counter
+ 16 Register */
+#define XAPM_SMC17_OFFSET 0x0650 /* Sampled Metric Counter
+ 17 Register */
+#define XAPM_SMC18_OFFSET 0x0660 /* Sampled Metric Counter
+ 18 Register */
+#define XAPM_SMC19_OFFSET 0x0670 /* Sampled Metric Counter
+ 19 Register */
+#define XAPM_SMC20_OFFSET 0x0680 /* Sampled Metric Counter
+ 20 Register */
+#define XAPM_SMC21_OFFSET 0x0690 /* Sampled Metric Counter
+ 21 Register */
+#define XAPM_SMC22_OFFSET 0x06A0 /* Sampled Metric Counter
+ 22 Register */
+#define XAPM_SMC23_OFFSET 0x06B0 /* Sampled Metric Counter
+ 23 Register */
+#define XAPM_SMC24_OFFSET 0x0800 /* Sampled Metric Counter
+ 24 Register */
+#define XAPM_SMC25_OFFSET 0x0810 /* Sampled Metric Counter
+ 25 Register */
+#define XAPM_SMC26_OFFSET 0x0820 /* Sampled Metric Counter
+ 26 Register */
+#define XAPM_SMC27_OFFSET 0x0830 /* Sampled Metric Counter
+ 27 Register */
+#define XAPM_SMC28_OFFSET 0x0840 /* Sampled Metric Counter
+ 28 Register */
+#define XAPM_SMC29_OFFSET 0x0850 /* Sampled Metric Counter
+ 29 Register */
+#define XAPM_SMC30_OFFSET 0x0860 /* Sampled Metric Counter
+ 30 Register */
+#define XAPM_SMC31_OFFSET 0x0870 /* Sampled Metric Counter
+ 31 Register */
+#define XAPM_SMC32_OFFSET 0x0880 /* Sampled Metric Counter
+ 32 Register */
+#define XAPM_SMC33_OFFSET 0x0890 /* Sampled Metric Counter
+ 33 Register */
+#define XAPM_SMC34_OFFSET 0x08A0 /* Sampled Metric Counter
+ 34 Register */
+#define XAPM_SMC35_OFFSET 0x08B0 /* Sampled Metric Counter
+ 35 Register */
+#define XAPM_SMC36_OFFSET 0x0A00 /* Sampled Metric Counter
+ 36 Register */
+#define XAPM_SMC37_OFFSET 0x0A10 /* Sampled Metric Counter
+ 37 Register */
+#define XAPM_SMC38_OFFSET 0x0A20 /* Sampled Metric Counter
+ 38 Register */
+#define XAPM_SMC39_OFFSET 0x0A30 /* Sampled Metric Counter
+ 39 Register */
+#define XAPM_SMC40_OFFSET 0x0A40 /* Sampled Metric Counter
+ 40 Register */
+#define XAPM_SMC41_OFFSET 0x0A50 /* Sampled Metric Counter
+ 41 Register */
+#define XAPM_SMC42_OFFSET 0x0A60 /* Sampled Metric Counter
+ 42 Register */
+#define XAPM_SMC43_OFFSET 0x0A70 /* Sampled Metric Counter
+ 43 Register */
+#define XAPM_SMC44_OFFSET 0x0A80 /* Sampled Metric Counter
+ 44 Register */
+#define XAPM_SMC45_OFFSET 0x0A90 /* Sampled Metric Counter
+ 45 Register */
+#define XAPM_SMC46_OFFSET 0x0AA0 /* Sampled Metric Counter
+ 46 Register */
+#define XAPM_SMC47_OFFSET 0x0AB0 /* Sampled Metric Counter
+ 47 Register */
+
+#define XAPM_CTL_OFFSET 0x0300 /* Control Register */
+
+#define XAPM_ID_OFFSET 0x0304 /* Latency ID Register */
+
+#define XAPM_IDMASK_OFFSET 0x0308 /* ID Mask Register */
+
+#define XAPM_RID_OFFSET 0x030C /* Latency Write ID Register */
+
+#define XAPM_RIDMASK_OFFSET 0x0310 /* Read ID mask register */
+
+#define XAPM_FEC_OFFSET 0x0400 /* flag Enable
+ Control Register */
+
+#define XAPM_SWD_OFFSET 0x0404 /* Software-written
+ Data Register */
+
+#define XAPM_SICR_MCNTR_RST_MASK 0x00000100 /* Enable the Metric
+ Counter Reset */
+#define XAPM_SICR_LOAD_MASK 0x00000002 /* Load the Sample Interval
+ Register Value into
+ the counter */
+#define XAPM_SICR_ENABLE_MASK 0x00000001 /* Enable the downcounter */
+
+#define XAPM_IXR_MC9_OVERFLOW_MASK 0x00001000 /**< Metric Counter 9
+ * Overflow> */
+#define XAPM_IXR_MC8_OVERFLOW_MASK 0x00000800 /**< Metric Counter 8
+ * Overflow> */
+#define XAPM_IXR_MC7_OVERFLOW_MASK 0x00000400 /**< Metric Counter 7
+ * Overflow> */
+#define XAPM_IXR_MC6_OVERFLOW_MASK 0x00000200 /**< Metric Counter 6
+ * Overflow> */
+#define XAPM_IXR_MC5_OVERFLOW_MASK 0x00000100 /**< Metric Counter 5
+ * Overflow> */
+#define XAPM_IXR_MC4_OVERFLOW_MASK 0x00000080 /**< Metric Counter 4
+ * Overflow> */
+#define XAPM_IXR_MC3_OVERFLOW_MASK 0x00000040 /**< Metric Counter 3
+ * Overflow> */
+#define XAPM_IXR_MC2_OVERFLOW_MASK 0x00000020 /**< Metric Counter 2
+ * Overflow> */
+#define XAPM_IXR_MC1_OVERFLOW_MASK 0x00000010 /**< Metric Counter 1
+ * Overflow> */
+#define XAPM_IXR_MC0_OVERFLOW_MASK 0x00000008 /**< Metric Counter 0
+ * Overflow> */
+#define XAPM_IXR_FIFO_FULL_MASK 0x00000004 /**< Event Log FIFO
+ * full> */
+#define XAPM_IXR_SIC_OVERFLOW_MASK 0x00000002 /**< Sample Interval
+ * Counter Overflow */
+#define XAPM_IXR_GCC_OVERFLOW_MASK 0x00000001 /**< Global Clock
+ Counter Overflow */
+#define XAPM_IXR_ALL_MASK (XAPM_IXR_SIC_OVERFLOW_MASK | \
+ XAPM_IXR_GCC_OVERFLOW_MASK | \
+ XAPM_IXR_FIFO_FULL_MASK | \
+ XAPM_IXR_MC0_OVERFLOW_MASK | \
+ XAPM_IXR_MC1_OVERFLOW_MASK | \
+ XAPM_IXR_MC2_OVERFLOW_MASK | \
+ XAPM_IXR_MC3_OVERFLOW_MASK | \
+ XAPM_IXR_MC4_OVERFLOW_MASK | \
+ XAPM_IXR_MC5_OVERFLOW_MASK | \
+ XAPM_IXR_MC6_OVERFLOW_MASK | \
+ XAPM_IXR_MC7_OVERFLOW_MASK | \
+ XAPM_IXR_MC8_OVERFLOW_MASK | \
+ XAPM_IXR_MC9_OVERFLOW_MASK)
+
+#define XAPM_CR_FIFO_RESET_MASK 0x02000000
+ /**< FIFO Reset */
+#define XAPM_CR_MUXSEL_MASK 0x01000000
+ /**< Mux Selector mask */
+#define XAPM_CR_GCC_RESET_MASK 0x00020000
+ /**< Global Clk
+ Counter Reset */
+#define XAPM_CR_GCC_ENABLE_MASK 0x00010000
+ /**< Global Clk
+ Counter Enable */
+#define XAPM_CR_EVTLOG_EXTTRIGGER_MASK 0x00000200
+ /**< Enable External trigger
+ to start event Log */
+#define XAPM_CR_EVENTLOG_ENABLE_MASK 0x00000100
+ /**< Event Log Enable */
+#define XAPM_CR_RDLATENCY_END_MASK 0x00000080
+ /**< Write Latency
+ End point */
+#define XAPM_CR_RDLATENCY_START_MASK 0x00000040
+ /**< Read Latency
+ Start point */
+#define XAPM_CR_WRLATENCY_END_MASK 0x00000020
+ /**< Write Latency
+ End point */
+#define XAPM_CR_WRLATENCY_START_MASK 0x00000010
+ /**< Write Latency
+ Start point */
+#define XAPM_CR_IDFILTER_ENABLE_MASK 0x00000008
+ /**< ID Filter Enable */
+#define XAPM_CR_MCNTR_EXTTRIGGER_MASK 0x00000004
+ /**< Enable External
+ trigger to start
+ Metric Counters */
+#define XAPM_CR_MCNTR_RESET_MASK 0x00000002
+ /**< Metrics Counter
+ Reset */
+#define XAPM_CR_MCNTR_ENABLE_MASK 0x00000001
+ /**< Metrics Counter
+ Enable */
+
+#define XAPM_ID_RID_MASK 0xFFFF0000 /**< Read ID */
+
+#define XAPM_ID_WID_MASK 0x0000FFFF /**< Write ID */
+
+#define XAPM_MASKID_RID_MASK 0xFFFF0000 /**< Read ID Mask */
+
+#define XAPM_MASKID_WID_MASK 0x0000FFFF /**< Write ID Mask*/
+
+
+#define XAPM_MAX_COUNTERS 10 /**< Maximum number of Counters */
+#define XAPM_MAX_COUNTERS_PROFILE 48 /**< Maximum number of Counters in
+ profile mode */
+
+#define XAPM_METRIC_COUNTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_METRIC_COUNTER_1 1 /**< Metric Counter 1 Register Index */
+#define XAPM_METRIC_COUNTER_2 2 /**< Metric Counter 2 Register Index */
+#define XAPM_METRIC_COUNTER_3 3 /**< Metric Counter 3 Register Index */
+#define XAPM_METRIC_COUNTER_4 4 /**< Metric Counter 4 Register Index */
+#define XAPM_METRIC_COUNTER_5 5 /**< Metric Counter 5 Register Index */
+#define XAPM_METRIC_COUNTER_6 6 /**< Metric Counter 6 Register Index */
+#define XAPM_METRIC_COUNTER_7 7 /**< Metric Counter 7 Register Index */
+#define XAPM_METRIC_COUNTER_8 8 /**< Metric Counter 8 Register Index */
+#define XAPM_METRIC_COUNTER_9 9 /**< Metric Counter 9 Register Index */
+
+#define XAPM_INCREMENTER_0 0 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_1 1 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_2 2 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_3 3 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_4 4 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_5 5 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_6 6 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_7 7 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_8 8 /**< Metric Counter 0 Register Index */
+#define XAPM_INCREMENTER_9 9 /**< Metric Counter 0 Register Index */
+
+#define XAPM_METRIC_SET_0 0 /**< Write Transaction Count */
+#define XAPM_METRIC_SET_1 1 /**< Read Transaction Count */
+#define XAPM_METRIC_SET_2 2 /**< Write Byte Count */
+#define XAPM_METRIC_SET_3 3 /**< Read Byte Count */
+#define XAPM_METRIC_SET_4 4 /**< Write Beat Count */
+#define XAPM_METRIC_SET_5 5 /**< Total Read Latency */
+#define XAPM_METRIC_SET_6 6 /**< Total Write Latency */
+#define XAPM_METRIC_SET_7 7 /**< Slv_Wr_Idle_Cnt */
+#define XAPM_METRIC_SET_8 8 /**< Mst_Rd_Idle_Cnt */
+#define XAPM_METRIC_SET_9 9 /**< Num_BValids */
+#define XAPM_METRIC_SET_10 10 /**< Num_WLasts */
+#define XAPM_METRIC_SET_11 11 /**< Num_RLasts */
+#define XAPM_METRIC_SET_12 12 /**< Minimum Write Latency */
+#define XAPM_METRIC_SET_13 13 /**< Maximum Write Latency */
+#define XAPM_METRIC_SET_14 14 /**< Minimum Read Latency */
+#define XAPM_METRIC_SET_15 15 /**< Maximum Read Latency */
+#define XAPM_METRIC_SET_16 16 /**< Transfer Cycle Count */
+#define XAPM_METRIC_SET_17 17 /**< Packet Count */
+#define XAPM_METRIC_SET_18 18 /**< Data Byte Count */
+#define XAPM_METRIC_SET_19 19 /**< Position Byte Count */
+#define XAPM_METRIC_SET_20 20 /**< Null Byte Count */
+#define XAPM_METRIC_SET_21 21 /**< Slv_Idle_Cnt */
+#define XAPM_METRIC_SET_22 22 /**< Mst_Idle_Cnt */
+#define XAPM_METRIC_SET_30 30 /**< External event count */
+
+#define XAPM_MAX_AGENTS 8 /**< Maximum number of Agents */
+
+#define XAPM_FLAG_WRADDR 0x00000001 /**< Write Address flag */
+#define XAPM_FLAG_FIRSTWR 0x00000002 /**< First Write flag */
+#define XAPM_FLAG_LASTWR 0x00000004 /**< Last Write flag */
+#define XAPM_FLAG_RESPONSE 0x00000008 /**< Response flag */
+#define XAPM_FLAG_RDADDR 0x00000010 /**< Read Address flag */
+#define XAPM_FLAG_FIRSTRD 0x00000020 /**< First Read flag */
+#define XAPM_FLAG_LASTRD 0x00000040 /**< Last Read flag */
+#define XAPM_FLAG_SWDATA 0x00010000 /**< Software-written Data flag */
+#define XAPM_FLAG_EVENT 0x00020000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTOP 0x00040000 /**< Last Read flag */
+#define XAPM_FLAG_EVNTSTART 0x00080000 /**< Last Read flag */
+#define XAPM_FLAG_GCCOVF 0x00100000 /**< Global Clock Counter Overflow
+ * flag */
+#define XAPM_FLAG_SCLAPSE 0x00200000 /**< Sample Counter Lapse flag */
+#define XAPM_FLAG_MC0 0x00400000 /**< Metric Counter 0 flag */
+#define XAPM_FLAG_MC1 0x00800000 /**< Metric Counter 1 flag */
+#define XAPM_FLAG_MC2 0x01000000 /**< Metric Counter 2 flag */
+#define XAPM_FLAG_MC3 0x02000000 /**< Metric Counter 3 flag */
+#define XAPM_FLAG_MC4 0x04000000 /**< Metric Counter 4 flag */
+#define XAPM_FLAG_MC5 0x08000000 /**< Metric Counter 5 flag */
+#define XAPM_FLAG_MC6 0x10000000 /**< Metric Counter 6 flag */
+#define XAPM_FLAG_MC7 0x20000000 /**< Metric Counter 7 flag */
+#define XAPM_FLAG_MC8 0x40000000 /**< Metric Counter 8 flag */
+#define XAPM_FLAG_MC9 0x80000000 /**< Metric Counter 9 flag */
+
+#define XAPM_LATENCY_ADDR_ISSUE 0 /**< Address Issue as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_ADDR_ACCEPT 1 /**< Address Acceptance as start
+ point for Latency calculation*/
+#define XAPM_LATENCY_LASTRD 0 /**< Last Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_LASTWR 0 /**< Last Write as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTRD 1 /**< First Read as end point for
+ Latency calculation */
+#define XAPM_LATENCY_FIRSTWR 1 /**< First Write as end point for
+ Latency calculation */
+
+#define XAPM_MODE_TRACE 2 /**< APM in Trace mode */
+
+#define XAPM_MODE_PROFILE 1 /**< APM in Profile mode */
+
+#define XAPM_MODE_ADVANCED 0 /**< APM in Advanced mode */
+
+typedef unsigned char u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef unsigned long ulong;
+
+ulong baseaddr;
+
+struct xapm_param {
+ u32 mode;
+ u32 maxslots;
+ u32 eventcnt;
+ u32 eventlog;
+ u32 sampledcnt;
+ u32 numcounters;
+ u32 metricwidth;
+ u32 sampledwidth;
+ u32 globalcntwidth;
+ u32 scalefactor;
+ u32 isr;
+ bool is_32bit_filter;
+};
+
+static struct xapm_param *params;
+
+/*****************************************************************************/
+/**
+*
+* Read a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to read.
+*
+* @return The contents of the register.
+*
+* @note C-style Signature:
+* u32 readreg(u32 baseaddr, u32 regoffset);
+*
+******************************************************************************/
+#define readreg(baseaddr, regoffset) \
+ (*(u32 *)(baseaddr + regoffset))
+
+/*****************************************************************************/
+/**
+*
+* Write a register of the AXI Performance Monitor device. This macro provides
+* register access to all registers using the register offsets defined above.
+*
+* @param baseaddr contains the base address of the device.
+* @param regoffset is the offset of the register to write.
+* @param data is the value to write to the register.
+*
+* @return None.
+*
+* @note C-style Signature:
+* void writereg(u32 baseaddr,
+* u32 regoffset,u32 Data)
+*
+******************************************************************************/
+#define writereg(baseaddr, regoffset, data) \
+ (*(u32 *)(baseaddr + regoffset) = data)
+
+/****************************************************************************/
+/**
+*
+* This routine enables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobalenable()
+*
+*****************************************************************************/
+#define intrglobalenable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 1)
+
+
+/****************************************************************************/
+/**
+*
+* This routine disables the Global Interrupt.
+*
+* @note C-Style signature:
+* void intrglobaldisable(void)
+*
+*****************************************************************************/
+#define intrglobaldisable() \
+ writereg(baseaddr, XAPM_GIE_OFFSET, 0)
+
+/****************************************************************************/
+/**
+*
+* This routine enables interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to enable interrupts.
+*
+* @param mask is the mask to enable. Bit positions of 1 will be enabled.
+* Bit positions of 0 will keep the previous setting. This mask is
+* formed by OR'ing XAPM_IXR__* bits defined in xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrenable(u32 mask)
+*
+*****************************************************************************/
+#define intrenable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+
+/****************************************************************************/
+/**
+*
+* This routine disable interrupt(s). Use the XAPM_IXR_* constants defined in
+* xaxipmon_hw.h to create the bit-mask to disable interrupts.
+*
+* @param mask is the mask to disable. Bit positions of 1 will be
+* disabled. Bit positions of 0 will keep the previous setting.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrdisable(u32 mask)
+*
+*****************************************************************************/
+#define intrdisable(mask) \
+ writereg(baseaddr, XAPM_IE_OFFSET, readreg(baseaddr, \
+ XAPM_IE_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine clears the specified interrupt(s).
+*
+* @param mask is the mask to clear. Bit positions of 1 will be cleared.
+* This mask is formed by OR'ing XAPM_IXR_* bits defined in
+* xaxipmon_hw.h.
+*
+* @return None.
+*
+* @note C-Style signature:
+* void intrclear(u32 mask)
+*
+*****************************************************************************/
+#define intrclear(mask) \
+ writereg(baseaddr, XAPM_IS_OFFSET, readreg(baseaddr, \
+ XAPM_IS_OFFSET) | mask);
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return isr value updated by kernel driver
+*
+* @note This macro returns isr value updated by kernel driver.
+* C-Style signature:
+* void intrgetstatus(void)
+*
+*****************************************************************************/
+#define intrgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This routine returns the Interrupt Status Register.
+*
+* @return Interrupt Status Register contents
+*
+* @note C-Style signature:
+* void intrhwgetstatus(void)
+*
+*****************************************************************************/
+#define intrhwgetstatus() (params->isr)
+
+/****************************************************************************/
+/**
+*
+* This function enables the Global Clock Counter.
+*
+* @note C-Style signature:
+* void enablegcc(void);
+*
+*****************************************************************************/
+#define enablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_GCC_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the Global Clock Counter.
+*
+* @note C-Style signature:
+* void disablegcc(void);
+*
+*****************************************************************************/
+#define disablegcc() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_GCC_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h
+*
+* @return None
+*
+* @note C-Style signature:
+* void enableflag(void);
+*
+*****************************************************************************/
+#define enableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) | flag);
+
+/****************************************************************************/
+/**
+*
+* This function disables the specified flag in flag Control Register.
+*
+* @param flag is one of the XAPM_FLAG_* masks defined in xaxipmon.h*
+* @return None
+*
+* @note C-Style signature:
+* void disableflag(void);
+*
+*****************************************************************************/
+#define disableflag(flag) \
+ writereg(baseaddr, XAPM_FEC_OFFSET, \
+ readreg(baseaddr, XAPM_FEC_OFFSET) & ~(flag));
+
+/****************************************************************************/
+/**
+*
+* This function loads the sample interval register value into the sample
+* interval counter.
+*
+* @note C-Style signature:
+* void loadsic(void);
+*
+*****************************************************************************/
+#define loadsic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_LOAD_MASK)
+
+
+/****************************************************************************/
+/**
+*
+* This enables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void enablesic(void);
+*
+*****************************************************************************/
+#define enablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_ENABLE_MASK)
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablesic(void);
+*
+*****************************************************************************/
+#define disablesic() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This enables Reset of Metric Counters when Sample Interval Counter lapses.
+*
+* @note C-Style signature:
+* void enablemcreset(void);
+*
+*****************************************************************************/
+#define enablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, XAPM_SICR_MCNTR_RST_MASK);
+
+/****************************************************************************/
+/**
+*
+* This disables the down count of the sample interval counter.
+*
+* @note C-Style signature:
+* void disablemcreset(void);
+*
+*****************************************************************************/
+#define disablemcreset() \
+ writereg(baseaddr, XAPM_SICR_OFFSET, \
+ readreg(baseaddr, XAPM_SICR_OFFSET) & ~(XAPM_SICR_MCNTR_RST_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function enables the ID Filter Masking.
+*
+* @note C-Style signature:
+* void enableidfilter(void);
+*
+*****************************************************************************/
+#define enableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) | XAPM_CR_IDFILTER_ENABLE_MASK);
+
+/****************************************************************************/
+/**
+*
+* This function disbles the ID Filter masking.
+*
+* @note C-Style signature:
+* void disableidfilter(void);
+*
+*****************************************************************************/
+#define disableidfilter() \
+ writereg(baseaddr, XAPM_CTL_OFFSET, readreg(baseaddr, \
+ XAPM_CTL_OFFSET) & ~(XAPM_CR_IDFILTER_ENABLE_MASK));
+
+/****************************************************************************/
+/**
+*
+* This function samples Metric Counters to Sampled Metric Counters by
+* reading Sample Register and also returns interval. i.e. the number of
+* clocks in between previous read to the current read of sample register.
+*
+* @return Interval. i.e. the number of clocks in between previous
+* read to the current read of sample register.
+*
+* @note C-Style signature:
+* u32 samplemetrics(void);
+*
+*****************************************************************************/
+#define samplemetrics() readreg(baseaddr, XAPM_SR_OFFSET);
+
+
+/************************** Function Prototypes *****************************/
+
+int resetmetriccounter(void);
+
+void resetglobalclkcounter(void);
+
+int resetfifo(void);
+
+void setincrementerrange(u8 incrementer, u16 rangehigh, u16 rangelow);
+
+void getincrementerrange(u8 incrementer, u16 *rangehigh, u16 *rangelow);
+
+void setsampleinterval(u32 sampleinterval);
+
+void getsampleinterval(u32 *sampleinterval);
+
+int setmetrics(u8 slot, u8 metrics, u8 counter);
+
+int getmetrics(u8 counter, u8 *metrics, u8 *slot);
+void getglobalclkcounter(u32 *cnthigh, u32 *cntlow);
+
+u32 getmetriccounter(u32 counter);
+
+u32 getsampledmetriccounter(u32 counter);
+
+u32 getincrementer(u32 incrementer);
+
+u32 getsampledincrementer(u32 incrementer);
+
+void setswdatareg(u32 swdata);
+
+u32 getswdatareg(void);
+
+int starteventlog(u32 flagenables);
+
+int stopeventlog(void);
+
+int startcounters(u32 sampleinterval);
+
+int stopcounters(void);
+
+void enablemetricscounter(void);
+
+void disablemetricscounter(void);
+
+void setlogenableranges(u32 counter, u16 rangehigh, u16 rangelow);
+
+void getlogenableranges(u32 counter, u16 *rangehigh, u16 *rangelow);
+
+void enableeventlog(void);
+
+void enablemctrigger(void);
+
+void disablemctrigger(void);
+
+void enableeventlogtrigger(void);
+
+void disableeventlogtrigger(void);
+
+const char *getmetricname(u8 metrics);
+
+void setwriteid(u32 writeid);
+
+void setreadid(u32 readid);
+
+u32 getwriteid(void);
+
+u32 getreadid(void);
+
+void setwrlatencystart(u8 param);
+
+void setwrlatencyend(u8 param);
+
+void setrdlatencystart(u8 param);
+
+void setrdlatencyend(u8 param);
+
+u8 getwrlatencystart(void);
+
+u8 getwrlatencyend(void);
+
+u8 getrdlatencystart(void);
+
+u8 getrdlatencyend(void);
+
+void setwriteidmask(u32 wrmask);
+
+void setreadidmask(u32 rdmask);
+
+u32 getwriteidmask(void);
+
+u32 getreadidmask(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* End of protection macro. */
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 1d3586b68db7..11170d6e7c17 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,4 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
+config SND_SOC_XILINX_DP
+ tristate "Audio support for the the Xilinx DisplayPort"
+ select SND_DMAENGINE_PCM
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Audio support the for Xilinx DisplayPort.
+
+config SND_SOC_XILINX_AUDIO_FORMATTER
+ tristate "Audio support for the the Xilinx audio formatter"
+ help
+ Select this option to enable Xilinx audio formatter
+ support. This provides PCM platform device support for
+ audio functionality.
+
+config SND_SOC_XILINX_SDI
+ tristate "Audio support for the the Xilinx SDI"
+ depends on DRM_XLNX_SDI
+ depends on VIDEO_XILINX_SDIRXSS
+ help
+ Select this option to enable Xilinx SDI Audio.This enables
+ SDI audio playback and capture using xilinx soft IP
+
config SND_SOC_XILINX_I2S
tristate "Audio support for the Xilinx I2S"
help
@@ -7,6 +29,7 @@ config SND_SOC_XILINX_I2S
mode, IP receives audio in AES format, extracts PCM and sends
PCM data. In receiver mode, IP receives PCM audio and
encapsulates PCM in AES format and sends AES data.
+ I2S playback and capture using xilinx soft IP
config SND_SOC_XILINX_AUDIO_FORMATTER
tristate "Audio support for the the Xilinx audio formatter"
@@ -19,5 +42,17 @@ config SND_SOC_XILINX_SPDIF
tristate "Audio support for the the Xilinx SPDIF"
help
Select this option to enable Xilinx SPDIF Audio.
- This provides playback and capture of SPDIF audio in
- AES format.
+ Enabling this provides one of the component required in ASoC
+ audio pipeline.
+ This supports playback and capture usecases.
+
+config SND_SOC_XILINX_PL_SND_CARD
+ tristate "Audio support for the the Xilinx PL sound card"
+ depends on SND_SOC_XILINX_AUDIO_FORMATTER
+ depends on SND_SOC_XILINX_I2S
+ depends on SND_SOC_XILINX_SDI
+ select SND_SOC_HDMI_CODEC
+ help
+ Select this option to enable Xilinx PL sound card
+ support. This enables sound card using xilinx soft IPs
+ in audio pipeline.
diff --git a/sound/soc/xilinx/Makefile b/sound/soc/xilinx/Makefile
index be7652ce7c13..7b0a5461067e 100644
--- a/sound/soc/xilinx/Makefile
+++ b/sound/soc/xilinx/Makefile
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-pcm.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-codec.o
+obj-$(CONFIG_SND_SOC_XILINX_DP) += xilinx-dp-card.o
+obj-$(CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER) += xlnx_formatter_pcm.o
+obj-$(CONFIG_SND_SOC_XILINX_SDI) += xlnx_sdi_audio.o
snd-soc-xlnx-i2s-objs := xlnx_i2s.o
obj-$(CONFIG_SND_SOC_XILINX_I2S) += snd-soc-xlnx-i2s.o
snd-soc-xlnx-formatter-pcm-objs := xlnx_formatter_pcm.o
obj-$(CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER) += snd-soc-xlnx-formatter-pcm.o
-snd-soc-xlnx-spdif-objs := xlnx_spdif.o
-obj-$(CONFIG_SND_SOC_XILINX_SPDIF) += snd-soc-xlnx-spdif.o
+obj-$(CONFIG_SND_SOC_XILINX_SPDIF) += xlnx_spdif.o
+obj-$(CONFIG_SND_SOC_XILINX_PL_SND_CARD) += xlnx_pl_snd_card.o
diff --git a/sound/soc/xilinx/xilinx-dp-card.c b/sound/soc/xilinx/xilinx-dp-card.c
new file mode 100644
index 000000000000..396a87d56394
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-card.c
@@ -0,0 +1,123 @@
+/*
+ * Xilinx DisplayPort SoC Sound Card support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static int xilinx_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 256);
+ return 0;
+}
+
+static const struct snd_soc_ops xilinx_dp_ops = {
+ .startup = xilinx_dp_startup,
+};
+
+SND_SOC_DAILINK_DEFS(xilinx_dp0,
+ DAILINK_COMP_ARRAY(COMP_CPU("xilinx-dp-snd-codec-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xilinx-dp-snd-codec-dai")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xilinx_dp1,
+ DAILINK_COMP_ARRAY(COMP_CPU("xilinx-dp-snd-codec-dai")),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xilinx-dp-snd-codec-dai")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+static struct snd_soc_dai_link xilinx_dp_dai_links[] = {
+ {
+ .name = "xilinx-dp0",
+ .stream_name = "xilinx-dp0",
+ SND_SOC_DAILINK_REG(xilinx_dp0),
+ .ops = &xilinx_dp_ops,
+ },
+ {
+ .name = "xilinx-dp1",
+ .stream_name = "xilinx-dp1",
+ SND_SOC_DAILINK_REG(xilinx_dp1),
+ .ops = &xilinx_dp_ops,
+ },
+
+};
+
+static struct snd_soc_card xilinx_dp_card = {
+ .name = "DisplayPort monitor",
+ .owner = THIS_MODULE,
+ .dai_link = xilinx_dp_dai_links,
+ .num_links = 2,
+};
+
+static int xilinx_dp_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &xilinx_dp_card;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *codec, *pcm;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ codec = of_parse_phandle(node, "xlnx,dp-snd-codec", 0);
+ if (!codec)
+ return -ENODEV;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 0);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[0].platforms->of_node = pcm;
+ xilinx_dp_dai_links[0].cpus->of_node = codec;
+ xilinx_dp_dai_links[0].codecs->of_node = codec;
+
+ pcm = of_parse_phandle(node, "xlnx,dp-snd-pcm", 1);
+ if (!pcm)
+ return -ENODEV;
+ xilinx_dp_dai_links[1].platforms->of_node = pcm;
+ xilinx_dp_dai_links[1].cpus->of_node = codec;
+ xilinx_dp_dai_links[1].codecs->of_node = codec;
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Card probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_of_match[] = {
+ { .compatible = "xlnx,dp-snd-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_of_match);
+
+static struct platform_driver xilinx_dp_aud_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-card",
+ .of_match_table = xilinx_dp_of_match,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = xilinx_dp_probe,
+};
+module_platform_driver(xilinx_dp_aud_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Card module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-codec.c b/sound/soc/xilinx/xilinx-dp-codec.c
new file mode 100644
index 000000000000..af6e6b08c415
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-codec.c
@@ -0,0 +1,178 @@
+/*
+ * Xilinx DisplayPort Sound Codec support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+/**
+ * struct xilinx_dp_codec - DisplayPort codec
+ * @aud_clk: audio clock
+ */
+struct xilinx_dp_codec {
+ struct clk *aud_clk;
+};
+
+struct xilinx_dp_codec_fmt {
+ unsigned long rate;
+ unsigned int snd_rate;
+};
+
+static struct snd_soc_dai_driver xilinx_dp_codec_dai = {
+ .name = "xilinx-dp-snd-codec-dai",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static const struct xilinx_dp_codec_fmt rates[] = {
+ {
+ .rate = 48000 * 512,
+ .snd_rate = SNDRV_PCM_RATE_48000
+ },
+ {
+ .rate = 44100 * 512,
+ .snd_rate = SNDRV_PCM_RATE_44100
+ }
+};
+
+static const struct snd_soc_component_driver xilinx_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static int xilinx_dp_codec_probe(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec;
+ unsigned int i;
+ unsigned long rate;
+ int ret;
+
+ codec = devm_kzalloc(&pdev->dev, sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+
+ codec->aud_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(codec->aud_clk))
+ return PTR_ERR(codec->aud_clk);
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable the aud_clk\n");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++) {
+ clk_disable_unprepare(codec->aud_clk);
+ ret = clk_set_rate(codec->aud_clk, rates[i].rate);
+ clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ continue;
+
+ rate = clk_get_rate(codec->aud_clk);
+ /* Ignore some offset +- 10 */
+ if (abs(rates[i].rate - rate) < 10) {
+ xilinx_dp_codec_dai.playback.rates = rates[i].snd_rate;
+ break;
+ }
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get required clock freq\n");
+ goto error_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &xilinx_dp_component_driver,
+ &xilinx_dp_codec_dai, 1);
+ if (ret)
+ goto error_clk;
+
+ platform_set_drvdata(pdev, codec);
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound Codec probed\n");
+
+ return 0;
+
+error_clk:
+ clk_disable_unprepare(codec->aud_clk);
+ return ret;
+}
+
+static int xilinx_dp_codec_dev_remove(struct platform_device *pdev)
+{
+ struct xilinx_dp_codec *codec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_suspend(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(codec->aud_clk);
+
+ return 0;
+}
+
+static int __maybe_unused xilinx_dp_codec_pm_resume(struct device *dev)
+{
+ struct xilinx_dp_codec *codec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(codec->aud_clk);
+ if (ret)
+ dev_err(dev, "failed to enable the aud_clk\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops xilinx_dp_codec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xilinx_dp_codec_pm_suspend,
+ xilinx_dp_codec_pm_resume)
+};
+
+static const struct of_device_id xilinx_dp_codec_of_match[] = {
+ { .compatible = "xlnx,dp-snd-codec", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_codec_of_match);
+
+static struct platform_driver xilinx_dp_codec_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-codec",
+ .of_match_table = xilinx_dp_codec_of_match,
+ .pm = &xilinx_dp_codec_pm_ops,
+ },
+ .probe = xilinx_dp_codec_probe,
+ .remove = xilinx_dp_codec_dev_remove,
+};
+module_platform_driver(xilinx_dp_codec_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound Codec module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xilinx-dp-pcm.c b/sound/soc/xilinx/xilinx-dp-pcm.c
new file mode 100644
index 000000000000..fa8abe788cf7
--- /dev/null
+++ b/sound/soc/xilinx/xilinx-dp-pcm.c
@@ -0,0 +1,76 @@
+/*
+ * Xilinx DisplayPort Sound PCM support
+ *
+ * Copyright (C) 2015 Xilinx, Inc.
+ *
+ * Author: Hyun Woo Kwon <hyunk@xilinx.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+static const struct snd_pcm_hardware xilinx_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static const struct snd_dmaengine_pcm_config xilinx_dmaengine_pcm_config = {
+ .pcm_hardware = &xilinx_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+};
+
+static int xilinx_dp_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dev_set_name(&pdev->dev, pdev->dev.of_node->name);
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &xilinx_dmaengine_pcm_config, 0);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Xilinx DisplayPort Sound PCM probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id xilinx_dp_pcm_of_match[] = {
+ { .compatible = "xlnx,dp-snd-pcm", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, xilinx_dp_pcm_of_match);
+
+static struct platform_driver xilinx_dp_pcm_driver = {
+ .driver = {
+ .name = "xilinx-dp-snd-pcm",
+ .of_match_table = xilinx_dp_pcm_of_match,
+ },
+ .probe = xilinx_dp_pcm_probe,
+};
+module_platform_driver(xilinx_dp_pcm_driver);
+
+MODULE_DESCRIPTION("Xilinx DisplayPort Sound PCM module");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 1d59fb668c77..37b29c56d00f 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -1,22 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
-//
-// Xilinx ASoC audio formatter support
-//
-// Copyright (C) 2018 Xilinx, Inc.
-//
-// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+/*
+ * Xilinx ASoC audio formatter support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ */
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <sound/asoundef.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
+#include "xlnx_snd_common.h"
+
#define DRV_NAME "xlnx_formatter_pcm"
#define XLNX_S2MM_OFFSET 0
@@ -56,7 +59,9 @@
#define CFG_S2MM_XFER_SHIFT 29
#define CFG_S2MM_PKG_MASK BIT(28)
+#define AUD_CTRL_DATA_WIDTH_MASK GENMASK(18, 16)
#define AUD_CTRL_DATA_WIDTH_SHIFT 16
+#define AUD_CTRL_ACTIVE_CH_MASK GENMASK(22, 19)
#define AUD_CTRL_ACTIVE_CH_SHIFT 19
#define PERIOD_CFG_PERIODS_SHIFT 16
@@ -66,12 +71,22 @@
#define PERIOD_BYTES_MAX (50 * 1024)
#define XLNX_PARAM_UNKNOWN 0
-enum bit_depth {
- BIT_DEPTH_8,
- BIT_DEPTH_16,
- BIT_DEPTH_20,
- BIT_DEPTH_24,
- BIT_DEPTH_32,
+static const struct snd_pcm_hardware xlnx_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .buffer_bytes_max = PERIODS_MAX * PERIOD_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = PERIOD_BYTES_MAX,
+ .periods_min = PERIODS_MIN,
+ .periods_max = PERIODS_MAX,
};
struct xlnx_pcm_drv_data {
@@ -82,7 +97,12 @@ struct xlnx_pcm_drv_data {
int mm2s_irq;
struct snd_pcm_substream *play_stream;
struct snd_pcm_substream *capture_stream;
+ struct platform_device *pdev;
+ struct device_node *nodes[XLNX_MAX_PATHS];
struct clk *axi_clk;
+ struct clk *mm2s_axis_clk;
+ struct clk *s2mm_axis_clk;
+ struct clk *aud_mclk;
};
/*
@@ -101,22 +121,12 @@ struct xlnx_pcm_stream_param {
u64 buffer_size;
};
-static const struct snd_pcm_hardware xlnx_pcm_hardware = {
- .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME,
- .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
- .rate_min = 8000,
- .rate_max = 192000,
- .buffer_bytes_max = PERIODS_MAX * PERIOD_BYTES_MAX,
- .period_bytes_min = PERIOD_BYTES_MIN,
- .period_bytes_max = PERIOD_BYTES_MAX,
- .periods_min = PERIODS_MIN,
- .periods_max = PERIODS_MAX,
+enum bit_depth {
+ BIT_DEPTH_8,
+ BIT_DEPTH_16,
+ BIT_DEPTH_20,
+ BIT_DEPTH_24,
+ BIT_DEPTH_32,
};
enum {
@@ -314,7 +324,7 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg)
}
static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
+ struct snd_pcm_substream *substream)
{
int err;
u32 val, data_format_mode;
@@ -386,7 +396,7 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
}
static int xlnx_formatter_pcm_close(struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
+ struct snd_pcm_substream *substream)
{
int ret;
struct xlnx_pcm_stream_param *stream_data =
@@ -406,7 +416,7 @@ err_reset:
static snd_pcm_uframes_t
xlnx_formatter_pcm_pointer(struct snd_soc_component *component,
- struct snd_pcm_substream *substream)
+ struct snd_pcm_substream *substream)
{
u32 pos;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -424,24 +434,36 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- u32 low, high, active_ch, val, bytes_per_ch, bits_per_sample;
- u32 aes_reg1_val, aes_reg2_val;
+ u32 low, high, active_ch, val, bits_per_sample, bytes_per_ch;
+ u32 aes_reg1_val, aes_reg2_val, sample_rate;
+ int status;
u64 size;
+ struct pl_card_data *prv;
struct snd_pcm_runtime *runtime = substream->runtime;
struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
+ struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ bits_per_sample = params_width(params);
+ sample_rate = params_rate(params);
active_ch = params_channels(params);
if (active_ch > stream_data->ch_limit)
return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
- stream_data->xfer_mode == AES_TO_PCM) {
+ stream_data->xfer_mode == AES_TO_PCM &&
+ ((strstr(adata->nodes[XLNX_CAPTURE]->name, "hdmi")) ||
+ (strstr(adata->nodes[XLNX_CAPTURE]->name, "sdi")))) {
+ /*
+ * If formatter is in AES_PCM mode for HDMI/SDI capture path,
+ * parse AES header
+ */
val = readl(stream_data->mmio + XLNX_AUD_STS);
if (val & AUD_STS_CH_STS_MASK) {
aes_reg1_val = readl(stream_data->mmio +
- XLNX_AUD_CH_STS_START);
+ XLNX_AUD_CH_STS_START);
aes_reg2_val = readl(stream_data->mmio +
- XLNX_AUD_CH_STS_START + 0x4);
+ XLNX_AUD_CH_STS_START + 0x4);
xlnx_parse_aes_params(aes_reg1_val, aes_reg2_val,
component->dev);
@@ -449,6 +471,9 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
}
size = params_buffer_bytes(params);
+ status = snd_pcm_lib_malloc_pages(substream, size);
+ if (status < 0)
+ return status;
stream_data->buffer_size = size;
@@ -458,6 +483,7 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB);
val = readl(stream_data->mmio + XLNX_AUD_CTRL);
+ val &= ~AUD_CTRL_DATA_WIDTH_MASK;
bits_per_sample = params_width(params);
switch (bits_per_sample) {
case 8:
@@ -475,10 +501,9 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
case 32:
val |= (BIT_DEPTH_32 << AUD_CTRL_DATA_WIDTH_SHIFT);
break;
- default:
- return -EINVAL;
}
+ val &= ~AUD_CTRL_ACTIVE_CH_MASK;
val |= active_ch << AUD_CTRL_ACTIVE_CH_SHIFT;
writel(val, stream_data->mmio + XLNX_AUD_CTRL);
@@ -488,12 +513,23 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
bytes_per_ch = DIV_ROUND_UP(params_period_bytes(params), active_ch);
writel(bytes_per_ch, stream_data->mmio + XLNX_BYTES_PER_CH);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ writel(prv->mclk_ratio,
+ stream_data->mmio + XLNX_AUD_FS_MULTIPLIER);
+ }
+
return 0;
}
+static int xlnx_formatter_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
static int xlnx_formatter_pcm_trigger(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- int cmd)
+ struct snd_pcm_substream *substream, int cmd)
{
u32 val;
struct xlnx_pcm_stream_param *stream_data =
@@ -520,29 +556,164 @@ static int xlnx_formatter_pcm_trigger(struct snd_soc_component *component,
}
static int xlnx_formatter_pcm_new(struct snd_soc_component *component,
- struct snd_soc_pcm_runtime *rtd)
+ struct snd_soc_pcm_runtime *rtd)
{
- snd_pcm_set_managed_buffer_all(rtd->pcm,
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, component->dev,
xlnx_pcm_hardware.buffer_bytes_max,
xlnx_pcm_hardware.buffer_bytes_max);
return 0;
}
-static const struct snd_soc_component_driver xlnx_asoc_component = {
- .name = DRV_NAME,
- .open = xlnx_formatter_pcm_open,
- .close = xlnx_formatter_pcm_close,
- .hw_params = xlnx_formatter_pcm_hw_params,
- .trigger = xlnx_formatter_pcm_trigger,
- .pointer = xlnx_formatter_pcm_pointer,
- .pcm_construct = xlnx_formatter_pcm_new,
+static struct snd_soc_component_driver xlnx_asoc_component = {
+ .name = DRV_NAME,
+ .open = xlnx_formatter_pcm_open,
+ .close = xlnx_formatter_pcm_close,
+ .hw_params = xlnx_formatter_pcm_hw_params,
+ .hw_free = xlnx_formatter_pcm_hw_free,
+ .trigger = xlnx_formatter_pcm_trigger,
+ .pointer = xlnx_formatter_pcm_pointer,
+ .pcm_construct = xlnx_formatter_pcm_new,
};
+static int configure_mm2s(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->mm2s_axis_clk = devm_clk_get(dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(aud_drv_data->mm2s_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->mm2s_axis_clk);
+ dev_err(dev, "failed to get m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->mm2s_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable m_axis_mm2s_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->aud_mclk = devm_clk_get(dev, "aud_mclk");
+ if (IS_ERR(aud_drv_data->aud_mclk)) {
+ ret = PTR_ERR(aud_drv_data->aud_mclk);
+ dev_err(dev, "failed to get aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+ ret = clk_prepare_enable(aud_drv_data->aud_mclk);
+ if (ret) {
+ dev_err(dev, "failed to enable aud_mclk(%d)\n", ret);
+ goto axis_clk_err;
+ }
+
+ aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
+ "irq_mm2s");
+ if (aud_drv_data->mm2s_irq < 0) {
+ ret = aud_drv_data->mm2s_irq;
+ goto mm2s_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
+ xlnx_mm2s_irq_handler, 0,
+ "xlnx_formatter_pcm_mm2s_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio mm2s irq request failed\n");
+ goto mm2s_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto mm2s_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_MM2S_OFFSET,
+ SNDRV_PCM_STREAM_PLAYBACK);
+
+ aud_drv_data->nodes[XLNX_PLAYBACK] =
+ of_parse_phandle(dev->of_node, "xlnx,tx", 0);
+ if (!aud_drv_data->nodes[XLNX_PLAYBACK])
+ dev_err(dev, "tx node not found\n");
+ else
+ dev_info(dev,
+ "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_PLAYBACK])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_PLAYBACK]);
+
+ aud_drv_data->mm2s_presence = true;
+ return 0;
+
+mm2s_err:
+ clk_disable_unprepare(aud_drv_data->aud_mclk);
+axis_clk_err:
+ clk_disable_unprepare(aud_drv_data->mm2s_axis_clk);
+
+ return ret;
+}
+
+static int configure_s2mm(struct xlnx_pcm_drv_data *aud_drv_data,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ aud_drv_data->s2mm_axis_clk = devm_clk_get(dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(aud_drv_data->s2mm_axis_clk)) {
+ ret = PTR_ERR(aud_drv_data->s2mm_axis_clk);
+ dev_err(dev, "failed to get s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(aud_drv_data->s2mm_axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable s_axis_s2mm_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev, "irq_s2mm");
+ if (aud_drv_data->s2mm_irq < 0) {
+ ret = aud_drv_data->s2mm_irq;
+ goto s2mm_err;
+ }
+ ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
+ xlnx_s2mm_irq_handler, 0,
+ "xlnx_formatter_pcm_s2mm_irq",
+ dev);
+ if (ret) {
+ dev_err(dev, "xlnx audio s2mm irq request failed\n");
+ goto s2mm_err;
+ }
+ ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET);
+ if (ret) {
+ dev_err(dev, "audio formatter reset failed\n");
+ goto s2mm_err;
+ }
+ xlnx_formatter_disable_irqs(aud_drv_data->mmio +
+ XLNX_S2MM_OFFSET,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ aud_drv_data->nodes[XLNX_CAPTURE] =
+ of_parse_phandle(dev->of_node, "xlnx,rx", 0);
+ if (!aud_drv_data->nodes[XLNX_CAPTURE])
+ dev_err(dev, "rx node not found\n");
+ else
+ dev_info(dev, "sound card device will use DAI link: %s\n",
+ (aud_drv_data->nodes[XLNX_CAPTURE])->name);
+ of_node_put(aud_drv_data->nodes[XLNX_CAPTURE]);
+
+ aud_drv_data->s2mm_presence = true;
+ return 0;
+
+s2mm_err:
+ clk_disable_unprepare(aud_drv_data->s2mm_axis_clk);
+ return ret;
+}
+
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
{
int ret;
u32 val;
+ size_t pdata_size;
struct xlnx_pcm_drv_data *aud_drv_data;
struct device *dev = &pdev->dev;
@@ -558,8 +729,7 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
}
ret = clk_prepare_enable(aud_drv_data->axi_clk);
if (ret) {
- dev_err(dev,
- "failed to enable s_axi_lite_aclk(%d)\n", ret);
+ dev_err(dev, "failed to enable s_axi_lite_aclk(%d)\n", ret);
return ret;
}
@@ -572,57 +742,15 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
val = readl(aud_drv_data->mmio + XLNX_AUD_CORE_CONFIG);
if (val & AUD_CFG_MM2S_MASK) {
- aud_drv_data->mm2s_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
- goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_MM2S_OFFSET,
- SNDRV_PCM_STREAM_PLAYBACK);
-
- aud_drv_data->mm2s_irq = platform_get_irq_byname(pdev,
- "irq_mm2s");
- if (aud_drv_data->mm2s_irq < 0) {
- ret = aud_drv_data->mm2s_irq;
+ ret = configure_mm2s(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->mm2s_irq,
- xlnx_mm2s_irq_handler, 0,
- "xlnx_formatter_pcm_mm2s_irq", dev);
- if (ret) {
- dev_err(dev, "xlnx audio mm2s irq request failed\n");
- goto clk_err;
- }
}
+
if (val & AUD_CFG_S2MM_MASK) {
- aud_drv_data->s2mm_presence = true;
- ret = xlnx_formatter_pcm_reset(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET);
- if (ret) {
- dev_err(dev, "audio formatter reset failed\n");
- goto clk_err;
- }
- xlnx_formatter_disable_irqs(aud_drv_data->mmio +
- XLNX_S2MM_OFFSET,
- SNDRV_PCM_STREAM_CAPTURE);
-
- aud_drv_data->s2mm_irq = platform_get_irq_byname(pdev,
- "irq_s2mm");
- if (aud_drv_data->s2mm_irq < 0) {
- ret = aud_drv_data->s2mm_irq;
+ ret = configure_s2mm(aud_drv_data, pdev);
+ if (ret)
goto clk_err;
- }
- ret = devm_request_irq(dev, aud_drv_data->s2mm_irq,
- xlnx_s2mm_irq_handler, 0,
- "xlnx_formatter_pcm_s2mm_irq",
- dev);
- if (ret) {
- dev_err(dev, "xlnx audio s2mm irq request failed\n");
- goto clk_err;
- }
}
dev_set_drvdata(dev, aud_drv_data);
@@ -634,6 +762,19 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
goto clk_err;
}
+ pdata_size = sizeof(aud_drv_data->nodes);
+ if (aud_drv_data->nodes[XLNX_PLAYBACK] ||
+ aud_drv_data->nodes[XLNX_CAPTURE])
+ aud_drv_data->pdev =
+ platform_device_register_resndata(dev, "xlnx_snd_card",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0,
+ &aud_drv_data->nodes,
+ pdata_size);
+ if (!aud_drv_data->pdev)
+ dev_err(dev, "sound card device creation failed\n");
+
+ dev_info(dev, "pcm platform device registered\n");
return 0;
clk_err:
@@ -646,6 +787,8 @@ static int xlnx_formatter_pcm_remove(struct platform_device *pdev)
int ret = 0;
struct xlnx_pcm_drv_data *adata = dev_get_drvdata(&pdev->dev);
+ platform_device_unregister(adata->pdev);
+
if (adata->s2mm_presence)
ret = xlnx_formatter_pcm_reset(adata->mmio + XLNX_S2MM_OFFSET);
@@ -676,5 +819,5 @@ static struct platform_driver xlnx_formatter_pcm_driver = {
};
module_platform_driver(xlnx_formatter_pcm_driver);
-MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index cc641e582c82..1ae53d1cc2ec 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -7,6 +7,7 @@
// Author: Praveen Vuppala <praveenv@xilinx.com>
// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -22,15 +23,22 @@
#define I2S_CH0_OFFSET 0x30
#define I2S_I2STIM_VALID_MASK GENMASK(7, 0)
+struct xlnx_i2s_dev_data {
+ void __iomem *base;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_mclk;
+};
+
static int xlnx_i2s_set_sclkout_div(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
- void __iomem *base = snd_soc_dai_get_drvdata(cpu_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(cpu_dai);
if (!div || (div & ~I2S_I2STIM_VALID_MASK))
return -EINVAL;
- writel(div, base + I2S_I2STIM_OFFSET);
+ writel(div, dev_data->base + I2S_I2STIM_OFFSET);
return 0;
}
@@ -40,13 +48,13 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *i2s_dai)
{
u32 reg_off, chan_id;
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
chan_id = params_channels(params) / 2;
while (chan_id > 0) {
reg_off = I2S_CH0_OFFSET + ((chan_id - 1) * 4);
- writel(chan_id, base + reg_off);
+ writel(chan_id, dev_data->base + reg_off);
chan_id--;
}
@@ -56,18 +64,18 @@ static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
static int xlnx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *i2s_dai)
{
- void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+ struct xlnx_i2s_dev_data *dev_data = snd_soc_dai_get_drvdata(i2s_dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- writel(1, base + I2S_CORE_CTRL_OFFSET);
+ writel(1, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- writel(0, base + I2S_CORE_CTRL_OFFSET);
+ writel(0, dev_data->base + I2S_CORE_CTRL_OFFSET);
break;
default:
return -EINVAL;
@@ -95,8 +103,8 @@ MODULE_DEVICE_TABLE(of, xlnx_i2s_of_match);
static int xlnx_i2s_probe(struct platform_device *pdev)
{
- void __iomem *base;
struct snd_soc_dai_driver *dai_drv;
+ struct xlnx_i2s_dev_data *dev_data;
int ret;
u32 ch, format, data_width;
struct device *dev = &pdev->dev;
@@ -106,9 +114,16 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
if (!dai_drv)
return -ENOMEM;
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ dev_data = devm_kzalloc(&pdev->dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->axi_clk = devm_clk_get(&pdev->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(dev_data->axi_clk)) {
+ ret = PTR_ERR(dev_data->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
ret = of_property_read_u32(node, "xlnx,num-channels", &ch);
if (ret < 0) {
@@ -141,6 +156,15 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->playback.channels_max = ch;
dai_drv->playback.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "s_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get s_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else if (of_device_is_compatible(node, "xlnx,i2s-receiver-1.0")) {
dai_drv->name = "xlnx_i2s_capture";
dai_drv->capture.stream_name = "Capture";
@@ -149,30 +173,93 @@ static int xlnx_i2s_probe(struct platform_device *pdev)
dai_drv->capture.channels_max = ch;
dai_drv->capture.rates = SNDRV_PCM_RATE_8000_192000;
dai_drv->ops = &xlnx_i2s_dai_ops;
+
+ dev_data->axis_clk = devm_clk_get(&pdev->dev,
+ "m_axis_aud_aclk");
+ if (IS_ERR(dev_data->axis_clk)) {
+ ret = PTR_ERR(dev_data->axis_clk);
+ dev_err(&pdev->dev,
+ "failed to get m_axis_aud_aclk(%d)\n", ret);
+ return ret;
+ }
} else {
return -ENODEV;
}
- dev_set_drvdata(&pdev->dev, base);
+ dev_data->aud_mclk = devm_clk_get(&pdev->dev, "aud_mclk");
+ if (IS_ERR(dev_data->aud_mclk)) {
+ ret = PTR_ERR(dev_data->aud_mclk);
+ dev_err(&pdev->dev, "failed to get aud_mclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_ctrl_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev_data->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable axis_aud_aclk(%d)\n", ret);
+ goto err_axis_clk;
+ }
+
+ ret = clk_prepare_enable(dev_data->aud_mclk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable aud_mclk(%d)\n", ret);
+ goto err_aud_mclk;
+ }
+
+ dev_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->base)) {
+ ret = PTR_ERR(dev_data->base);
+ goto clk_err;
+ }
+
+ dev_set_drvdata(&pdev->dev, dev_data);
ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_i2s_component,
dai_drv, 1);
if (ret) {
dev_err(&pdev->dev, "i2s component registration failed\n");
- return ret;
+ goto clk_err;
}
dev_info(&pdev->dev, "%s DAI registered\n", dai_drv->name);
+ return 0;
+clk_err:
+ clk_disable_unprepare(dev_data->aud_mclk);
+err_aud_mclk:
+ clk_disable_unprepare(dev_data->axis_clk);
+err_axis_clk:
+ clk_disable_unprepare(dev_data->axi_clk);
+
return ret;
}
+static int xlnx_i2s_remove(struct platform_device *pdev)
+{
+ struct xlnx_i2s_dev_data *dev_data = dev_get_drvdata(&pdev->dev);
+
+ clk_disable_unprepare(dev_data->aud_mclk);
+ clk_disable_unprepare(dev_data->axis_clk);
+ clk_disable_unprepare(dev_data->axi_clk);
+
+ return 0;
+}
+
static struct platform_driver xlnx_i2s_aud_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = xlnx_i2s_of_match,
},
.probe = xlnx_i2s_probe,
+ .remove = xlnx_i2s_remove,
};
module_platform_driver(xlnx_i2s_aud_driver);
diff --git a/sound/soc/xilinx/xlnx_pl_snd_card.c b/sound/soc/xilinx/xlnx_pl_snd_card.c
new file mode 100644
index 000000000000..ddab8b3e733b
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_pl_snd_card.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "xlnx_snd_common.h"
+
+#define I2S_CLOCK_RATIO 384
+#define XLNX_MAX_PL_SND_DEV 5
+
+static DEFINE_IDA(xlnx_snd_card_dev);
+
+enum {
+ I2S_AUDIO = 0,
+ HDMI_AUDIO,
+ SDI_AUDIO,
+ SPDIF_AUDIO,
+ XLNX_MAX_IFACE,
+};
+
+static const char *xlnx_snd_card_name[XLNX_MAX_IFACE] = {
+ [I2S_AUDIO] = "xlnx-i2s-snd-card",
+ [HDMI_AUDIO] = "xlnx-hdmi-snd-card",
+ [SDI_AUDIO] = "xlnx-sdi-snd-card",
+ [SPDIF_AUDIO] = "xlnx-spdif-snd-card",
+};
+
+static const char *dev_compat[][XLNX_MAX_IFACE] = {
+ [XLNX_PLAYBACK] = {
+ "xlnx,i2s-transmitter-1.0",
+ "xlnx,v-hdmi-tx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+
+ [XLNX_CAPTURE] = {
+ "xlnx,i2s-receiver-1.0",
+ "xlnx,v-hdmi-rx-ss-3.1",
+ "xlnx,v-uhdsdi-audio-2.0",
+ "xlnx,spdif-2.0",
+ },
+};
+
+static int xlnx_spdif_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ /* mclk must be >=1024 * sampleing rate */
+ prv->mclk_val = 1024 * sample_rate;
+ prv->mclk_ratio = 1024;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_sdi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_hdmi_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pl_card_data *prv = snd_soc_card_get_drvdata(rtd->card);
+ u32 sample_rate = params_rate(params);
+
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static int xlnx_i2s_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int ret, clk_div;
+ u32 ch, data_width, sample_rate;
+ struct pl_card_data *prv;
+
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+ ch = params_channels(params);
+ data_width = params_width(params);
+ sample_rate = params_rate(params);
+
+ /* only 2 channels supported */
+ if (ch != 2)
+ return -EINVAL;
+
+ prv = snd_soc_card_get_drvdata(rtd->card);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ switch (sample_rate) {
+ case 5512:
+ case 8000:
+ case 11025:
+ case 16000:
+ case 22050:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 64000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ prv->mclk_ratio = 384;
+ break;
+ case 64000:
+ case 176400:
+ case 192000:
+ prv->mclk_ratio = 192;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ prv->mclk_val = prv->mclk_ratio * sample_rate;
+ clk_div = DIV_ROUND_UP(prv->mclk_ratio, 2 * ch * data_width);
+ ret = snd_soc_dai_set_clkdiv(cpu_dai, 0, clk_div);
+ if (ret)
+ return ret;
+
+ return clk_set_rate(prv->mclk, prv->mclk_val);
+}
+
+static const struct snd_soc_ops xlnx_sdi_card_ops = {
+ .hw_params = xlnx_sdi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_i2s_card_ops = {
+ .hw_params = xlnx_i2s_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_hdmi_card_ops = {
+ .hw_params = xlnx_hdmi_card_hw_params,
+};
+
+static const struct snd_soc_ops xlnx_spdif_card_ops = {
+ .hw_params = xlnx_spdif_card_hw_params,
+};
+
+SND_SOC_DAILINK_DEFS(xlnx_i2s_capture,
+ DAILINK_COMP_ARRAY(COMP_CPU("xlnx_i2s_capture")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_i2s_playback,
+ DAILINK_COMP_ARRAY(COMP_CPU("xlnx_i2s_playback")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_hdmi_tx,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC("hdmi-audio-codec.0", "i2s-hifi")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_hdmi_rx,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_hdmi_rx")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_sdi_tx,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_sdi_tx")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_sdi_rx,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "xlnx_sdi_rx")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+SND_SOC_DAILINK_DEFS(xlnx_spdif,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM(NULL)));
+
+static struct snd_soc_dai_link xlnx_snd_dai[][XLNX_MAX_PATHS] = {
+ [I2S_AUDIO] = {
+ {
+ .name = "xilinx-i2s_playback",
+ SND_SOC_DAILINK_REG(xlnx_i2s_playback),
+ .ops = &xlnx_i2s_card_ops,
+ },
+ {
+ .name = "xilinx-i2s_capture",
+ SND_SOC_DAILINK_REG(xlnx_i2s_capture),
+ .ops = &xlnx_i2s_card_ops,
+ },
+ },
+ [HDMI_AUDIO] = {
+ {
+ .name = "xilinx-hdmi-playback",
+ SND_SOC_DAILINK_REG(xlnx_hdmi_tx),
+ .ops = &xlnx_hdmi_card_ops,
+ },
+ {
+ .name = "xilinx-hdmi-capture",
+ SND_SOC_DAILINK_REG(xlnx_hdmi_rx),
+ },
+ },
+ [SDI_AUDIO] = {
+ {
+ .name = "xlnx-sdi-playback",
+ SND_SOC_DAILINK_REG(xlnx_sdi_tx),
+ .ops = &xlnx_sdi_card_ops,
+ },
+ {
+ .name = "xlnx-sdi-capture",
+ SND_SOC_DAILINK_REG(xlnx_sdi_rx),
+ },
+ },
+ [SPDIF_AUDIO] = {
+ {
+ .name = "xilinx-spdif_playback",
+ SND_SOC_DAILINK_REG(xlnx_spdif),
+ .ops = &xlnx_spdif_card_ops,
+ },
+ {
+ .name = "xilinx-spdif_capture",
+ SND_SOC_DAILINK_REG(xlnx_spdif),
+ .ops = &xlnx_spdif_card_ops,
+ },
+ },
+
+};
+
+static int find_link(struct device_node *node, int direction)
+{
+ int ret;
+ u32 i, size;
+ const char **link_names = dev_compat[direction];
+
+ size = ARRAY_SIZE(dev_compat[direction]);
+
+ for (i = 0; i < size; i++) {
+ ret = of_device_is_compatible(node, link_names[i]);
+ if (ret)
+ return i;
+ }
+ return -ENODEV;
+}
+
+static int xlnx_snd_probe(struct platform_device *pdev)
+{
+ u32 i;
+ size_t sz;
+ char *buf;
+ int ret, audio_interface;
+ struct snd_soc_dai_link *dai;
+ struct pl_card_data *prv;
+ struct platform_device *iface_pdev;
+
+ struct snd_soc_card *card;
+ struct device_node **node = pdev->dev.platform_data;
+
+ if (!node)
+ return -ENODEV;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(struct snd_soc_card),
+ GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = &pdev->dev;
+
+ card->dai_link = devm_kzalloc(card->dev,
+ sizeof(*dai) * XLNX_MAX_PATHS,
+ GFP_KERNEL);
+ if (!card->dai_link)
+ return -ENOMEM;
+
+ prv = devm_kzalloc(card->dev,
+ sizeof(struct pl_card_data),
+ GFP_KERNEL);
+ if (!prv)
+ return -ENOMEM;
+
+ card->num_links = 0;
+ for (i = XLNX_PLAYBACK; i < XLNX_MAX_PATHS; i++) {
+ struct device_node *pnode = of_parse_phandle(node[i],
+ "xlnx,snd-pcm", 0);
+ if (!pnode) {
+ dev_err(card->dev, "platform node not found\n");
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ /*
+ * Check for either playback or capture is enough, as
+ * same clock is used for both.
+ */
+ if (i == XLNX_PLAYBACK) {
+ iface_pdev = of_find_device_by_node(pnode);
+ if (!iface_pdev) {
+ of_node_put(pnode);
+ return -ENODEV;
+ }
+
+ prv->mclk = devm_clk_get(&iface_pdev->dev, "aud_mclk");
+ if (IS_ERR(prv->mclk))
+ return PTR_ERR(prv->mclk);
+
+ }
+ of_node_put(pnode);
+
+ dai = &card->dai_link[i];
+ audio_interface = find_link(node[i], i);
+ switch (audio_interface) {
+ case I2S_AUDIO:
+ *dai = xlnx_snd_dai[I2S_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->cpus->of_node = node[i];
+ card->num_links++;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case HDMI_AUDIO:
+ *dai = xlnx_snd_dai[HDMI_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ if (i == XLNX_CAPTURE)
+ dai->codecs->of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SDI_AUDIO:
+ *dai = xlnx_snd_dai[SDI_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->codecs->of_node = node[i];
+ card->num_links++;
+ /* TODO: support multiple sampling rates */
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ case SPDIF_AUDIO:
+ *dai = xlnx_snd_dai[SPDIF_AUDIO][i];
+ dai->platforms->of_node = pnode;
+ dai->codecs->of_node = node[i];
+ card->num_links++;
+ prv->mclk_ratio = 384;
+ snd_soc_card_set_drvdata(card, prv);
+ dev_dbg(card->dev, "%s registered\n",
+ card->dai_link[i].name);
+ break;
+ default:
+ dev_err(card->dev, "Invalid audio interface\n");
+ return -ENODEV;
+ }
+ }
+
+ if (card->num_links) {
+ /*
+ * Example : i2s card name = xlnx-i2s-snd-card-0
+ * length = number of chars in "xlnx-i2s-snd-card"
+ * + 1 ('-'), + 1 (card instance num)
+ * + 1 ('\0')
+ */
+ sz = strlen(xlnx_snd_card_name[audio_interface]) + 3;
+ buf = devm_kzalloc(card->dev, sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ prv->xlnx_snd_dev_id = ida_simple_get(&xlnx_snd_card_dev, 0,
+ XLNX_MAX_PL_SND_DEV,
+ GFP_KERNEL);
+ if (prv->xlnx_snd_dev_id < 0)
+ return prv->xlnx_snd_dev_id;
+
+ snprintf(buf, sz, "%s-%d", xlnx_snd_card_name[audio_interface],
+ prv->xlnx_snd_dev_id);
+ card->name = buf;
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(card->dev, "%s registration failed\n",
+ card->name);
+ ida_simple_remove(&xlnx_snd_card_dev,
+ prv->xlnx_snd_dev_id);
+ return ret;
+ }
+
+ dev_set_drvdata(card->dev, prv);
+ dev_info(card->dev, "%s registered\n", card->name);
+ }
+
+ return 0;
+}
+
+static int xlnx_snd_remove(struct platform_device *pdev)
+{
+ struct pl_card_data *pdata = dev_get_drvdata(&pdev->dev);
+
+ ida_simple_remove(&xlnx_snd_card_dev, pdata->xlnx_snd_dev_id);
+ return 0;
+}
+
+static struct platform_driver xlnx_snd_driver = {
+ .driver = {
+ .name = "xlnx_snd_card",
+ },
+ .probe = xlnx_snd_probe,
+ .remove = xlnx_snd_remove,
+};
+
+module_platform_driver(xlnx_snd_driver);
+
+MODULE_DESCRIPTION("Xilinx FPGA sound card driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_sdi_audio.c b/sound/soc/xilinx/xlnx_sdi_audio.c
new file mode 100644
index 000000000000..75b0b3150b6f
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_sdi_audio.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx SDI embed and extract audio support
+ *
+ * Copyright (c) 2018 Xilinx Pvt., Ltd
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <drm/drm_modes.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define DRIVER_NAME "xlnx-sdi-audio"
+
+#define XSDIAUD_CNTRL_REG_OFFSET 0x00
+#define XSDIAUD_SOFT_RST_REG_OFFSET 0x04
+#define XSDIAUD_VER_REG_OFFSET 0x08
+#define XSDIAUD_INT_EN_REG_OFFSET 0x0C
+#define XSDIAUD_INT_STS_REG_OFFSET 0x10
+#define XSDIAUD_EMB_VID_CNTRL_REG_OFFSET 0X14
+#define XSDIAUD_AUD_CNTRL_REG_OFFSET 0x18
+#define XSDIAUD_CH_VALID_REG_OFFSET 0x20
+#define XSDIAUD_CH_MUTE_REG_OFFSET 0x30
+#define XSDIAUD_ACTIVE_GRP_REG_OFFSET 0X40
+#define XSDIAUD_EXT_CH_STAT0_REG_OFFSET 0X48
+#define XSDIAUD_EXT_SRATE_STS_REG_OFFSET 0X70
+#define XSDIAUD_GUI_PARAM_REG_OFFSET 0XFC
+
+#define XSDIAUD_CNTRL_EN_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CONFIG_MASK BIT(0)
+#define XSDIAUD_SOFT_RST_CORE_MASK BIT(1)
+#define XSDIAUD_VER_MAJOR_MASK GENMASK(31, 24)
+#define XSDIAUD_VER_MINOR_MASK GENMASK(23, 16)
+
+#define XSDIAUD_EXT_GROUP_1_STS_MASK BIT(0)
+#define XSDIAUD_EXT_AUDSTS_UPDATE_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_ELE_SHIFT (16)
+#define XSDIAUD_EMB_VID_CNT_ELE_MASK BIT(16)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_MASK BIT(8)
+#define XSDIAUD_EMB_VID_CNT_TSCAN_SHIFT (8)
+#define XSDIAUD_EMB_VID_CNT_TRATE_SHIFT (4)
+#define XSDIAUD_EMB_AUD_CNT_SS_MASK BIT(3)
+#define XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO BIT(4)
+
+#define CH_STATUS_UPDATE_TIMEOUT 40
+
+enum IP_MODE {
+ EMBED,
+ EXTRACT,
+};
+
+enum channel_id {
+ CHAN_ID_0 = 1,
+ CHAN_ID_1,
+};
+
+enum sdi_transport_family {
+ SDI_TRANSPORT_FAMILY_1920,
+ SDI_TRANSPORT_FAMILY_1280,
+ SDI_TRANSPORT_FAMILY_2048,
+ SDI_TRANSPORT_FAMILY_NTSC = 8,
+ SDI_TRANSPORT_FAMILY_PAL = 9,
+};
+
+/**
+ * enum sdi_audio_samplerate - audio sampling rate
+ * @XSDIAUD_SAMPRATE0: 48 KHz
+ * @XSDIAUD_SAMPRATE1: 44.1 KHz
+ * @XSDIAUD_SAMPRATE2: 32 KHz
+ */
+enum sdi_audio_samplerate {
+ XSDIAUD_SAMPRATE0,
+ XSDIAUD_SAMPRATE1,
+ XSDIAUD_SAMPRATE2
+};
+
+/**
+ * enum sdi_audio_samplesize - bits per sample
+ * @XSDIAUD_SAMPSIZE0: 20 Bit Audio Sample
+ * @XSDIAUD_SAMPSIZE1: 24 Bit Audio Sample
+ */
+enum sdi_audio_samplesize {
+ XSDIAUD_SAMPSIZE0,
+ XSDIAUD_SAMPSIZE1
+};
+
+struct dev_ctx {
+ enum IP_MODE mode;
+ void __iomem *base;
+ struct device *dev;
+ struct drm_display_mode *video_mode;
+ struct snd_pcm_substream *stream;
+ struct clk *axi_clk;
+ struct clk *axis_clk;
+ struct clk *aud_clk;
+ bool rx_srate_updated;
+ wait_queue_head_t srate_q;
+};
+
+static irqreturn_t xtract_irq_handler(int irq, void *dev_id)
+{
+ u32 irq_sts, irq_en, active_grps;
+ struct dev_ctx *ctx = dev_id;
+
+ irq_sts = readl(ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ active_grps = readl(ctx->base + XSDIAUD_ACTIVE_GRP_REG_OFFSET);
+ if ((irq_sts & XSDIAUD_EXT_AUDSTS_UPDATE_MASK) &&
+ (active_grps & XSDIAUD_EXT_GROUP_1_STS_MASK)) {
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_STS_REG_OFFSET);
+ irq_en = readl(ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ /* Disable further interrupts. sample rate status got updated*/
+ writel(irq_en & ~XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+
+ ctx->rx_srate_updated = true;
+ wake_up_interruptible(&ctx->srate_q);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void audio_enable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val |= XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_disable(void __iomem *aud_base)
+{
+ u32 val;
+
+ val = readl(aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+ val &= ~XSDIAUD_CNTRL_EN_MASK;
+ writel(val, aud_base + XSDIAUD_CNTRL_REG_OFFSET);
+}
+
+static void audio_reset_core(void __iomem *aud_base, bool reset)
+{
+ u32 val;
+
+ if (reset) {
+ /* reset the core */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val |= XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ } else {
+ /* bring the core out of reset */
+ val = readl(aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ val &= ~XSDIAUD_SOFT_RST_CORE_MASK;
+ writel(val, aud_base + XSDIAUD_SOFT_RST_REG_OFFSET);
+ }
+}
+
+static int xlnx_sdi_rx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int err;
+ u32 val, sample_rate;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+ unsigned long jiffies = msecs_to_jiffies(CH_STATUS_UPDATE_TIMEOUT);
+
+ audio_enable(base);
+ writel(XSDIAUD_EXT_AUDSTS_UPDATE_MASK,
+ ctx->base + XSDIAUD_INT_EN_REG_OFFSET);
+ err = wait_event_interruptible_timeout(ctx->srate_q,
+ ctx->rx_srate_updated,
+ jiffies);
+
+ if (!err) {
+ dev_err(ctx->dev, "Didn't get valid audio property update\n");
+ return -EINVAL;
+ }
+ ctx->rx_srate_updated = false;
+
+ val = readl(base + XSDIAUD_EXT_SRATE_STS_REG_OFFSET);
+ /* As both channels contain same sample rate, read either of them */
+ switch (val & CHAN_ID_0) {
+ case 0:
+ sample_rate = 48000;
+ break;
+ case 1:
+ sample_rate = 44100;
+ break;
+ case 2:
+ sample_rate = 32000;
+ break;
+ }
+
+ dev_dbg(ctx->dev,
+ "sdi rx audio enabled : sample rate = %d\n", sample_rate);
+ return 0;
+}
+
+static void xlnx_sdi_rx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_disable(ctx->base);
+
+ dev_info(dai->dev, " sdi rx audio disabled\n");
+}
+
+static int xlnx_sdi_tx_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+
+ audio_enable(ctx->base);
+ ctx->stream = substream;
+
+ dev_info(ctx->dev, " sdi tx audio enabled\n");
+ return 0;
+}
+
+static int xlnx_sdi_tx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u32 val = 0;
+ u32 num_channels, sample_rate, sig_bits;
+
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ /* video mode properties needed by audio driver are shared to audio
+ * driver through a pointer in platform data. This is used here in
+ * audio driver. The solution may be needed to modify/extend to avoid
+ * probable error scenarios
+ */
+ if (!ctx->video_mode || !ctx->video_mode->vdisplay ||
+ !ctx->video_mode->vrefresh) {
+ dev_err(ctx->dev, "couldn't find video display properties\n");
+ return -EINVAL;
+ }
+
+ /*
+ * map video properties.
+ * Note: 1920x1080 and 2048x1080 are the resolutions of sub images for
+ * 3840x2160 and 4096x2160 resolutions respectively.
+ */
+ switch (ctx->video_mode->hdisplay) {
+ case 1920:
+ case 3840:
+ val = SDI_TRANSPORT_FAMILY_1920;
+ break;
+ case 1280:
+ val |= SDI_TRANSPORT_FAMILY_1280;
+ break;
+ case 2048:
+ case 4096:
+ val |= SDI_TRANSPORT_FAMILY_2048;
+ break;
+ case 720:
+ if (ctx->video_mode->vdisplay == 486)
+ val |= SDI_TRANSPORT_FAMILY_NTSC;
+ else if (ctx->video_mode->vdisplay == 576)
+ val |= SDI_TRANSPORT_FAMILY_PAL;
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (ctx->video_mode->vrefresh) {
+ case 24:
+ val |= (3 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 25:
+ val |= (5 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 30:
+ val |= (7 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 48:
+ val |= (8 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 50:
+ val |= (9 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ case 60:
+ val |= (11 << XSDIAUD_EMB_VID_CNT_TRATE_SHIFT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!(ctx->video_mode->flags & DRM_MODE_FLAG_INTERLACE))
+ val |= XSDIAUD_EMB_VID_CNT_TSCAN_MASK;
+
+ val |= XSDIAUD_EMB_VID_CNT_ELE_MASK;
+
+ writel(val, base + XSDIAUD_EMB_VID_CNTRL_REG_OFFSET);
+
+ /* map audio properties */
+ num_channels = params_channels(params);
+ sample_rate = params_rate(params);
+ sig_bits = snd_pcm_format_width(params_format(params));
+
+ dev_info(ctx->dev,
+ "stream params: channels = %d sample_rate = %d bits = %d\n",
+ num_channels, sample_rate, sig_bits);
+
+ val = 0;
+ val |= XSDIAUD_EMB_AUD_CNT_ASYNC_AUDIO;
+
+ switch (sample_rate) {
+ case 48000:
+ val |= XSDIAUD_SAMPRATE0;
+ break;
+ case 44100:
+ val |= XSDIAUD_SAMPRATE1;
+ break;
+ case 32000:
+ val |= XSDIAUD_SAMPRATE2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sig_bits == 24)
+ val |= XSDIAUD_EMB_AUD_CNT_SS_MASK;
+
+ writel(val, base + XSDIAUD_AUD_CNTRL_REG_OFFSET);
+
+ /* TODO: support more channels, currently only 2. */
+ writel(CHAN_ID_1 | CHAN_ID_0, base + XSDIAUD_CH_VALID_REG_OFFSET);
+
+ return 0;
+}
+
+static void xlnx_sdi_tx_pcm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(dai->dev);
+ void __iomem *base = ctx->base;
+
+ audio_disable(base);
+ ctx->stream = NULL;
+
+ dev_info(ctx->dev, " sdi tx audio disabled\n");
+}
+
+static const struct snd_soc_component_driver xlnx_sdi_component = {
+ .name = "xlnx-sdi-dai-component",
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_rx_dai_ops = {
+ .startup = xlnx_sdi_rx_pcm_startup,
+ .shutdown = xlnx_sdi_rx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_rx_dai = {
+ .name = "xlnx_sdi_rx",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_rx_dai_ops,
+};
+
+static const struct snd_soc_dai_ops xlnx_sdi_tx_dai_ops = {
+ .startup = xlnx_sdi_tx_pcm_startup,
+ .hw_params = xlnx_sdi_tx_hw_params,
+ .shutdown = xlnx_sdi_tx_pcm_shutdown,
+};
+
+static struct snd_soc_dai_driver xlnx_sdi_tx_dai = {
+ .name = "xlnx_sdi_tx",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &xlnx_sdi_tx_dai_ops,
+};
+
+static int xlnx_sdi_audio_probe(struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+ struct dev_ctx *ctx;
+ struct resource *res;
+ struct device *video_dev;
+ struct device_node *video_node;
+ struct platform_device *video_pdev;
+ struct snd_soc_dai_driver *snd_dai;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ /* TODO - remove before upstreaming */
+ if (of_device_is_compatible(node, "xlnx,v-uhdsdi-audio-1.0")) {
+ dev_err(&pdev->dev, "driver doesn't support sdi audio v1.0\n");
+ return -ENODEV;
+ }
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct dev_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENODEV;
+
+ ctx->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(ctx->axi_clk)) {
+ ret = PTR_ERR(ctx->axi_clk);
+ dev_err(&pdev->dev, "failed to get s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axi_aclk(%d)\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IO MEM resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -EADDRNOTAVAIL;
+ goto err_axis;
+ }
+
+ ctx->dev = &pdev->dev;
+
+ val = readl(ctx->base + XSDIAUD_GUI_PARAM_REG_OFFSET);
+ if (val & BIT(6)) {
+ ctx->mode = EXTRACT;
+
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "m_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get m_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_extract_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get sdi_extract_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No IRQ resource found\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+ ret = devm_request_irq(&pdev->dev, res->start,
+ xtract_irq_handler,
+ 0, "XLNX_SDI_AUDIO_XTRACT", ctx);
+ if (ret) {
+ dev_err(&pdev->dev, "extract irq request failed\n");
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ init_waitqueue_head(&ctx->srate_q);
+
+ snd_dai = &xlnx_sdi_rx_dai;
+ } else {
+ ctx->mode = EMBED;
+ ctx->axis_clk = devm_clk_get(&pdev->dev, "s_axis_clk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(&pdev->dev, "failed to get s_axis_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ ctx->aud_clk = devm_clk_get(&pdev->dev, "sdi_embed_clk");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(&pdev->dev, "failed to get aud_clk(%d)\n",
+ ret);
+ goto err_axis;
+ }
+
+ video_node = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
+ if (!video_node) {
+ dev_err(ctx->dev, "video_node not found\n");
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_pdev = of_find_device_by_node(video_node);
+ if (!video_pdev) {
+ of_node_put(video_node);
+ ret = -ENODEV;
+ goto err_axis;
+ }
+
+ video_dev = &video_pdev->dev;
+ ctx->video_mode =
+ (struct drm_display_mode *)video_dev->platform_data;
+ /* invalid 'platform_data' implies video driver is not loaded */
+ if (!ctx->video_mode) {
+ of_node_put(video_node);
+ ret = -EPROBE_DEFER;
+ goto err_axis;
+ }
+
+ snd_dai = &xlnx_sdi_tx_dai;
+ of_node_put(video_node);
+ }
+
+ ret = clk_prepare_enable(ctx->axis_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable s_axis_clk(%d)\n", ret);
+ goto err_axis;
+ }
+
+ ret = clk_prepare_enable(ctx->aud_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to enable sdi_extract_clk(%d)\n", ret);
+ goto err_aud_clk;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_sdi_component,
+ snd_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register codec DAI\n");
+ goto err_clk;
+ }
+
+ dev_set_drvdata(&pdev->dev, ctx);
+
+ audio_reset_core(ctx->base, true);
+ audio_reset_core(ctx->base, false);
+
+ dev_info(&pdev->dev, "xlnx sdi codec dai component registered\n");
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ctx->aud_clk);
+err_aud_clk:
+ clk_disable_unprepare(ctx->axis_clk);
+err_axis:
+ clk_disable_unprepare(ctx->axi_clk);
+ return ret;
+}
+
+static int xlnx_sdi_audio_remove(struct platform_device *pdev)
+{
+ struct dev_ctx *ctx = dev_get_drvdata(&pdev->dev);
+
+ audio_disable(ctx->base);
+ audio_reset_core(ctx->base, true);
+
+ clk_disable_unprepare(ctx->aud_clk);
+ clk_disable_unprepare(ctx->axis_clk);
+ clk_disable_unprepare(ctx->axi_clk);
+ return 0;
+}
+
+static const struct of_device_id xlnx_sdi_audio_of_match[] = {
+ { .compatible = "xlnx,v-uhdsdi-audio-1.0"},
+ { .compatible = "xlnx,v-uhdsdi-audio-2.0"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, xlnx_sdi_audio_of_match);
+
+static struct platform_driver xlnx_sdi_audio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_sdi_audio_of_match,
+ },
+ .probe = xlnx_sdi_audio_probe,
+ .remove = xlnx_sdi_audio_remove,
+};
+
+module_platform_driver(xlnx_sdi_audio_driver);
+
+MODULE_DESCRIPTION("xilinx sdi audio codec driver");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/xilinx/xlnx_snd_common.h b/sound/soc/xilinx/xlnx_snd_common.h
new file mode 100644
index 000000000000..39461fac0d96
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_snd_common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx ASoC sound card support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ */
+
+#ifndef _XLNX_SND_COMMON_H
+#define _XLNX_SND_COMMON_H
+
+enum {
+ XLNX_PLAYBACK,
+ XLNX_CAPTURE,
+ XLNX_MAX_PATHS
+};
+
+struct pl_card_data {
+ u32 mclk_val;
+ u32 mclk_ratio;
+ int xlnx_snd_dev_id;
+ struct clk *mclk;
+};
+#endif /* _XLNX_SND_COMMON_H */
diff --git a/sound/soc/xilinx/xlnx_spdif.c b/sound/soc/xilinx/xlnx_spdif.c
index e2ca087adee6..b72a595650c7 100644
--- a/sound/soc/xilinx/xlnx_spdif.c
+++ b/sound/soc/xilinx/xlnx_spdif.c
@@ -24,32 +24,43 @@
#define XLNX_SPDIF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
#define XSPDIF_IRQ_STS_REG 0x20
+#define XSPDIF_IRQ_STS_CH_STS_MASK BIT(5)
#define XSPDIF_IRQ_ENABLE_REG 0x28
#define XSPDIF_SOFT_RESET_REG 0x40
+#define XSPDIF_SOFT_RESET_VAL 0xA
#define XSPDIF_CONTROL_REG 0x44
+#define XSPDIF_CONTROL_ENABLE_MASK BIT(0)
+#define XSPDIF_CONTROL_FIFO_FLUSH_MASK BIT(1)
+#define XSPDIF_CONTROL_CLK_CFG_MASK GENMASK(5, 2)
+#define XSPDIF_CONTROL_CLK_CFG_SHIFT 2
#define XSPDIF_CHAN_0_STS_REG 0x4C
-#define XSPDIF_GLOBAL_IRQ_ENABLE_REG 0x1C
+#define XSPDIF_GLOBAL_IRQ_REG 0x1C
+#define XSPDIF_GLOBAL_IRQ_ENABLE_MASK BIT(31)
#define XSPDIF_CH_A_USER_DATA_REG_0 0x64
-#define XSPDIF_CORE_ENABLE_MASK BIT(0)
-#define XSPDIF_FIFO_FLUSH_MASK BIT(1)
-#define XSPDIF_CH_STS_MASK BIT(5)
-#define XSPDIF_GLOBAL_IRQ_ENABLE BIT(31)
-#define XSPDIF_CLOCK_CONFIG_BITS_MASK GENMASK(5, 2)
-#define XSPDIF_CLOCK_CONFIG_BITS_SHIFT 2
-#define XSPDIF_SOFT_RESET_VALUE 0xA
-
-#define MAX_CHANNELS 2
-#define AES_SAMPLE_WIDTH 32
-#define CH_STATUS_UPDATE_TIMEOUT 40
+#define XSPDIF_MAX_CHANNELS 2
+#define XSPDIF_AES_SAMPLE_WIDTH 32
+#define XSPDIF_CH_STS_UPDATE_TIMEOUT 40
+
+enum {
+ CLK_DIV_BY_4,
+ CLK_DIV_BY_8,
+ CLK_DIV_BY_16,
+ CLK_DIV_BY_24,
+ CLK_DIV_BY_32,
+ CLK_DIV_BY_48,
+ CLK_DIV_BY_64,
+};
struct spdif_dev_data {
- u32 mode;
- u32 aclk;
- bool rx_chsts_updated;
+ wait_queue_head_t chsts_q;
void __iomem *base;
struct clk *axi_clk;
- wait_queue_head_t chsts_q;
+ struct clk *axis_clk;
+ struct clk *aud_clk;
+ u32 mode;
+ unsigned long aclk;
+ bool rx_chsts_updated;
};
static irqreturn_t xlnx_spdifrx_irq_handler(int irq, void *arg)
@@ -58,12 +69,12 @@ static irqreturn_t xlnx_spdifrx_irq_handler(int irq, void *arg)
struct spdif_dev_data *ctx = arg;
val = readl(ctx->base + XSPDIF_IRQ_STS_REG);
- if (val & XSPDIF_CH_STS_MASK) {
- writel(val & XSPDIF_CH_STS_MASK,
+ if (val & XSPDIF_IRQ_STS_CH_STS_MASK) {
+ writel(val & XSPDIF_IRQ_STS_CH_STS_MASK,
ctx->base + XSPDIF_IRQ_STS_REG);
val = readl(ctx->base +
XSPDIF_IRQ_ENABLE_REG);
- writel(val & ~XSPDIF_CH_STS_MASK,
+ writel(val & ~XSPDIF_IRQ_STS_CH_STS_MASK,
ctx->base + XSPDIF_IRQ_ENABLE_REG);
ctx->rx_chsts_updated = true;
@@ -81,14 +92,14 @@ static int xlnx_spdif_startup(struct snd_pcm_substream *substream,
struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
val = readl(ctx->base + XSPDIF_CONTROL_REG);
- val |= XSPDIF_FIFO_FLUSH_MASK;
+ val |= XSPDIF_CONTROL_FIFO_FLUSH_MASK;
writel(val, ctx->base + XSPDIF_CONTROL_REG);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- writel(XSPDIF_CH_STS_MASK,
+ writel(XSPDIF_IRQ_STS_CH_STS_MASK,
ctx->base + XSPDIF_IRQ_ENABLE_REG);
- writel(XSPDIF_GLOBAL_IRQ_ENABLE,
- ctx->base + XSPDIF_GLOBAL_IRQ_ENABLE_REG);
+ writel(XSPDIF_GLOBAL_IRQ_ENABLE_MASK,
+ ctx->base + XSPDIF_GLOBAL_IRQ_REG);
}
return 0;
@@ -99,7 +110,7 @@ static void xlnx_spdif_shutdown(struct snd_pcm_substream *substream,
{
struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
- writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
+ writel(XSPDIF_SOFT_RESET_VAL, ctx->base + XSPDIF_SOFT_RESET_REG);
}
static int xlnx_spdif_hw_params(struct snd_pcm_substream *substream,
@@ -109,38 +120,40 @@ static int xlnx_spdif_hw_params(struct snd_pcm_substream *substream,
u32 val, clk_div, clk_cfg;
struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
- clk_div = DIV_ROUND_CLOSEST(ctx->aclk, MAX_CHANNELS * AES_SAMPLE_WIDTH *
+ ctx->aclk = clk_get_rate(ctx->aud_clk);
+ clk_div = DIV_ROUND_CLOSEST(ctx->aclk, XSPDIF_MAX_CHANNELS *
+ XSPDIF_AES_SAMPLE_WIDTH *
params_rate(params));
switch (clk_div) {
case 4:
- clk_cfg = 0;
+ clk_cfg = CLK_DIV_BY_4;
break;
case 8:
- clk_cfg = 1;
+ clk_cfg = CLK_DIV_BY_8;
break;
case 16:
- clk_cfg = 2;
+ clk_cfg = CLK_DIV_BY_16;
break;
case 24:
- clk_cfg = 3;
+ clk_cfg = CLK_DIV_BY_24;
break;
case 32:
- clk_cfg = 4;
+ clk_cfg = CLK_DIV_BY_32;
break;
case 48:
- clk_cfg = 5;
+ clk_cfg = CLK_DIV_BY_48;
break;
case 64:
- clk_cfg = 6;
+ clk_cfg = CLK_DIV_BY_64;
break;
default:
return -EINVAL;
}
val = readl(ctx->base + XSPDIF_CONTROL_REG);
- val &= ~XSPDIF_CLOCK_CONFIG_BITS_MASK;
- val |= clk_cfg << XSPDIF_CLOCK_CONFIG_BITS_SHIFT;
+ val &= ~XSPDIF_CONTROL_CLK_CFG_MASK;
+ val |= clk_cfg << XSPDIF_CONTROL_CLK_CFG_SHIFT;
writel(val, ctx->base + XSPDIF_CONTROL_REG);
return 0;
@@ -150,7 +163,7 @@ static int rx_stream_detect(struct snd_soc_dai *dai)
{
int err;
struct spdif_dev_data *ctx = dev_get_drvdata(dai->dev);
- unsigned long jiffies = msecs_to_jiffies(CH_STATUS_UPDATE_TIMEOUT);
+ unsigned long jiffies = msecs_to_jiffies(XSPDIF_CH_STS_UPDATE_TIMEOUT);
/* start capture only if stream is detected within 40ms timeout */
err = wait_event_interruptible_timeout(ctx->chsts_q,
@@ -177,7 +190,7 @@ static int xlnx_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- val |= XSPDIF_CORE_ENABLE_MASK;
+ val |= XSPDIF_CONTROL_ENABLE_MASK;
writel(val, ctx->base + XSPDIF_CONTROL_REG);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ret = rx_stream_detect(dai);
@@ -185,7 +198,7 @@ static int xlnx_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- val &= ~XSPDIF_CORE_ENABLE_MASK;
+ val &= ~XSPDIF_CONTROL_ENABLE_MASK;
writel(val, ctx->base + XSPDIF_CONTROL_REG);
break;
default:
@@ -263,21 +276,34 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base)) {
ret = PTR_ERR(ctx->base);
- goto clk_err;
+ goto axi_clk_err;
}
ret = of_property_read_u32(node, "xlnx,spdif-mode", &ctx->mode);
if (ret < 0) {
dev_err(dev, "cannot get SPDIF mode\n");
- goto clk_err;
+ goto axi_clk_err;
}
if (ctx->mode) {
+ ctx->axis_clk = devm_clk_get(dev, "s_axis_aclk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(dev, "failed to get s_axis_aclk(%d)\n", ret);
+ goto axi_clk_err;
+ }
dai_drv = &xlnx_spdif_tx_dai;
} else {
+ ctx->axis_clk = devm_clk_get(dev, "m_axis_aclk");
+ if (IS_ERR(ctx->axis_clk)) {
+ ret = PTR_ERR(ctx->axis_clk);
+ dev_err(dev, "failed to get m_axis_aclk(%d)\n", ret);
+ goto axi_clk_err;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "No IRQ resource found\n");
ret = -ENODEV;
- goto clk_err;
+ goto axi_clk_err;
}
ret = devm_request_irq(dev, res->start,
xlnx_spdifrx_irq_handler,
@@ -285,17 +311,30 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "spdif rx irq request failed\n");
ret = -ENODEV;
- goto clk_err;
+ goto axi_clk_err;
}
init_waitqueue_head(&ctx->chsts_q);
dai_drv = &xlnx_spdif_rx_dai;
}
- ret = of_property_read_u32(node, "xlnx,aud_clk_i", &ctx->aclk);
- if (ret < 0) {
- dev_err(dev, "cannot get aud_clk_i value\n");
- goto clk_err;
+ ctx->aud_clk = devm_clk_get(dev, "aud_clk_i");
+ if (IS_ERR(ctx->aud_clk)) {
+ ret = PTR_ERR(ctx->aud_clk);
+ dev_err(dev, "failed to get aud_aclk(%d)\n", ret);
+ goto axi_clk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->axis_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable axis_aclk(%d)\n", ret);
+ goto axi_clk_err;
+ }
+
+ ret = clk_prepare_enable(ctx->aud_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable aud_aclk(%d)\n", ret);
+ goto axis_clk_err;
}
dev_set_drvdata(dev, ctx);
@@ -307,11 +346,17 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
goto clk_err;
}
- writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
+ writel(XSPDIF_SOFT_RESET_VAL, ctx->base + XSPDIF_SOFT_RESET_REG);
dev_info(dev, "%s DAI registered\n", dai_drv->name);
+ return 0;
clk_err:
+ clk_disable_unprepare(ctx->aud_clk);
+axis_clk_err:
+ clk_disable_unprepare(ctx->axis_clk);
+axi_clk_err:
clk_disable_unprepare(ctx->axi_clk);
+
return ret;
}
@@ -319,6 +364,8 @@ static int xlnx_spdif_remove(struct platform_device *pdev)
{
struct spdif_dev_data *ctx = dev_get_drvdata(&pdev->dev);
+ clk_disable_unprepare(ctx->aud_clk);
+ clk_disable_unprepare(ctx->axis_clk);
clk_disable_unprepare(ctx->axi_clk);
return 0;
}